xref: /linux/drivers/md/dm-table.c (revision d524dac9279b6a41ffdf7ff7958c577f2e387db6)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <asm/atomic.h>
21 
22 #define DM_MSG_PREFIX "table"
23 
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 
29 /*
30  * The table has always exactly one reference from either mapped_device->map
31  * or hash_cell->new_map. This reference is not counted in table->holders.
32  * A pair of dm_create_table/dm_destroy_table functions is used for table
33  * creation/destruction.
34  *
35  * Temporary references from the other code increase table->holders. A pair
36  * of dm_table_get/dm_table_put functions is used to manipulate it.
37  *
38  * When the table is about to be destroyed, we wait for table->holders to
39  * drop to zero.
40  */
41 
42 struct dm_table {
43 	struct mapped_device *md;
44 	atomic_t holders;
45 	unsigned type;
46 
47 	/* btree table */
48 	unsigned int depth;
49 	unsigned int counts[MAX_DEPTH];	/* in nodes */
50 	sector_t *index[MAX_DEPTH];
51 
52 	unsigned int num_targets;
53 	unsigned int num_allocated;
54 	sector_t *highs;
55 	struct dm_target *targets;
56 
57 	unsigned discards_supported:1;
58 
59 	/*
60 	 * Indicates the rw permissions for the new logical
61 	 * device.  This should be a combination of FMODE_READ
62 	 * and FMODE_WRITE.
63 	 */
64 	fmode_t mode;
65 
66 	/* a list of devices used by this table */
67 	struct list_head devices;
68 
69 	/* events get handed up using this callback */
70 	void (*event_fn)(void *);
71 	void *event_context;
72 
73 	struct dm_md_mempools *mempools;
74 
75 	struct list_head target_callbacks;
76 };
77 
78 /*
79  * Similar to ceiling(log_size(n))
80  */
81 static unsigned int int_log(unsigned int n, unsigned int base)
82 {
83 	int result = 0;
84 
85 	while (n > 1) {
86 		n = dm_div_up(n, base);
87 		result++;
88 	}
89 
90 	return result;
91 }
92 
93 /*
94  * Calculate the index of the child node of the n'th node k'th key.
95  */
96 static inline unsigned int get_child(unsigned int n, unsigned int k)
97 {
98 	return (n * CHILDREN_PER_NODE) + k;
99 }
100 
101 /*
102  * Return the n'th node of level l from table t.
103  */
104 static inline sector_t *get_node(struct dm_table *t,
105 				 unsigned int l, unsigned int n)
106 {
107 	return t->index[l] + (n * KEYS_PER_NODE);
108 }
109 
110 /*
111  * Return the highest key that you could lookup from the n'th
112  * node on level l of the btree.
113  */
114 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
115 {
116 	for (; l < t->depth - 1; l++)
117 		n = get_child(n, CHILDREN_PER_NODE - 1);
118 
119 	if (n >= t->counts[l])
120 		return (sector_t) - 1;
121 
122 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
123 }
124 
125 /*
126  * Fills in a level of the btree based on the highs of the level
127  * below it.
128  */
129 static int setup_btree_index(unsigned int l, struct dm_table *t)
130 {
131 	unsigned int n, k;
132 	sector_t *node;
133 
134 	for (n = 0U; n < t->counts[l]; n++) {
135 		node = get_node(t, l, n);
136 
137 		for (k = 0U; k < KEYS_PER_NODE; k++)
138 			node[k] = high(t, l + 1, get_child(n, k));
139 	}
140 
141 	return 0;
142 }
143 
144 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
145 {
146 	unsigned long size;
147 	void *addr;
148 
149 	/*
150 	 * Check that we're not going to overflow.
151 	 */
152 	if (nmemb > (ULONG_MAX / elem_size))
153 		return NULL;
154 
155 	size = nmemb * elem_size;
156 	addr = vmalloc(size);
157 	if (addr)
158 		memset(addr, 0, size);
159 
160 	return addr;
161 }
162 
163 /*
164  * highs, and targets are managed as dynamic arrays during a
165  * table load.
166  */
167 static int alloc_targets(struct dm_table *t, unsigned int num)
168 {
169 	sector_t *n_highs;
170 	struct dm_target *n_targets;
171 	int n = t->num_targets;
172 
173 	/*
174 	 * Allocate both the target array and offset array at once.
175 	 * Append an empty entry to catch sectors beyond the end of
176 	 * the device.
177 	 */
178 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
179 					  sizeof(sector_t));
180 	if (!n_highs)
181 		return -ENOMEM;
182 
183 	n_targets = (struct dm_target *) (n_highs + num);
184 
185 	if (n) {
186 		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
187 		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
188 	}
189 
190 	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
191 	vfree(t->highs);
192 
193 	t->num_allocated = num;
194 	t->highs = n_highs;
195 	t->targets = n_targets;
196 
197 	return 0;
198 }
199 
200 int dm_table_create(struct dm_table **result, fmode_t mode,
201 		    unsigned num_targets, struct mapped_device *md)
202 {
203 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
204 
205 	if (!t)
206 		return -ENOMEM;
207 
208 	INIT_LIST_HEAD(&t->devices);
209 	INIT_LIST_HEAD(&t->target_callbacks);
210 	atomic_set(&t->holders, 0);
211 	t->discards_supported = 1;
212 
213 	if (!num_targets)
214 		num_targets = KEYS_PER_NODE;
215 
216 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
217 
218 	if (alloc_targets(t, num_targets)) {
219 		kfree(t);
220 		t = NULL;
221 		return -ENOMEM;
222 	}
223 
224 	t->mode = mode;
225 	t->md = md;
226 	*result = t;
227 	return 0;
228 }
229 
230 static void free_devices(struct list_head *devices)
231 {
232 	struct list_head *tmp, *next;
233 
234 	list_for_each_safe(tmp, next, devices) {
235 		struct dm_dev_internal *dd =
236 		    list_entry(tmp, struct dm_dev_internal, list);
237 		DMWARN("dm_table_destroy: dm_put_device call missing for %s",
238 		       dd->dm_dev.name);
239 		kfree(dd);
240 	}
241 }
242 
243 void dm_table_destroy(struct dm_table *t)
244 {
245 	unsigned int i;
246 
247 	if (!t)
248 		return;
249 
250 	while (atomic_read(&t->holders))
251 		msleep(1);
252 	smp_mb();
253 
254 	/* free the indexes */
255 	if (t->depth >= 2)
256 		vfree(t->index[t->depth - 2]);
257 
258 	/* free the targets */
259 	for (i = 0; i < t->num_targets; i++) {
260 		struct dm_target *tgt = t->targets + i;
261 
262 		if (tgt->type->dtr)
263 			tgt->type->dtr(tgt);
264 
265 		dm_put_target_type(tgt->type);
266 	}
267 
268 	vfree(t->highs);
269 
270 	/* free the device list */
271 	if (t->devices.next != &t->devices)
272 		free_devices(&t->devices);
273 
274 	dm_free_md_mempools(t->mempools);
275 
276 	kfree(t);
277 }
278 
279 void dm_table_get(struct dm_table *t)
280 {
281 	atomic_inc(&t->holders);
282 }
283 
284 void dm_table_put(struct dm_table *t)
285 {
286 	if (!t)
287 		return;
288 
289 	smp_mb__before_atomic_dec();
290 	atomic_dec(&t->holders);
291 }
292 
293 /*
294  * Checks to see if we need to extend highs or targets.
295  */
296 static inline int check_space(struct dm_table *t)
297 {
298 	if (t->num_targets >= t->num_allocated)
299 		return alloc_targets(t, t->num_allocated * 2);
300 
301 	return 0;
302 }
303 
304 /*
305  * See if we've already got a device in the list.
306  */
307 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
308 {
309 	struct dm_dev_internal *dd;
310 
311 	list_for_each_entry (dd, l, list)
312 		if (dd->dm_dev.bdev->bd_dev == dev)
313 			return dd;
314 
315 	return NULL;
316 }
317 
318 /*
319  * Open a device so we can use it as a map destination.
320  */
321 static int open_dev(struct dm_dev_internal *d, dev_t dev,
322 		    struct mapped_device *md)
323 {
324 	static char *_claim_ptr = "I belong to device-mapper";
325 	struct block_device *bdev;
326 
327 	int r;
328 
329 	BUG_ON(d->dm_dev.bdev);
330 
331 	bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
332 	if (IS_ERR(bdev))
333 		return PTR_ERR(bdev);
334 
335 	r = bd_link_disk_holder(bdev, dm_disk(md));
336 	if (r) {
337 		blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
338 		return r;
339 	}
340 
341 	d->dm_dev.bdev = bdev;
342 	return 0;
343 }
344 
345 /*
346  * Close a device that we've been using.
347  */
348 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
349 {
350 	if (!d->dm_dev.bdev)
351 		return;
352 
353 	bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
354 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
355 	d->dm_dev.bdev = NULL;
356 }
357 
358 /*
359  * If possible, this checks an area of a destination device is invalid.
360  */
361 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
362 				  sector_t start, sector_t len, void *data)
363 {
364 	struct queue_limits *limits = data;
365 	struct block_device *bdev = dev->bdev;
366 	sector_t dev_size =
367 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
368 	unsigned short logical_block_size_sectors =
369 		limits->logical_block_size >> SECTOR_SHIFT;
370 	char b[BDEVNAME_SIZE];
371 
372 	if (!dev_size)
373 		return 0;
374 
375 	if ((start >= dev_size) || (start + len > dev_size)) {
376 		DMWARN("%s: %s too small for target: "
377 		       "start=%llu, len=%llu, dev_size=%llu",
378 		       dm_device_name(ti->table->md), bdevname(bdev, b),
379 		       (unsigned long long)start,
380 		       (unsigned long long)len,
381 		       (unsigned long long)dev_size);
382 		return 1;
383 	}
384 
385 	if (logical_block_size_sectors <= 1)
386 		return 0;
387 
388 	if (start & (logical_block_size_sectors - 1)) {
389 		DMWARN("%s: start=%llu not aligned to h/w "
390 		       "logical block size %u of %s",
391 		       dm_device_name(ti->table->md),
392 		       (unsigned long long)start,
393 		       limits->logical_block_size, bdevname(bdev, b));
394 		return 1;
395 	}
396 
397 	if (len & (logical_block_size_sectors - 1)) {
398 		DMWARN("%s: len=%llu not aligned to h/w "
399 		       "logical block size %u of %s",
400 		       dm_device_name(ti->table->md),
401 		       (unsigned long long)len,
402 		       limits->logical_block_size, bdevname(bdev, b));
403 		return 1;
404 	}
405 
406 	return 0;
407 }
408 
409 /*
410  * This upgrades the mode on an already open dm_dev, being
411  * careful to leave things as they were if we fail to reopen the
412  * device and not to touch the existing bdev field in case
413  * it is accessed concurrently inside dm_table_any_congested().
414  */
415 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
416 			struct mapped_device *md)
417 {
418 	int r;
419 	struct dm_dev_internal dd_new, dd_old;
420 
421 	dd_new = dd_old = *dd;
422 
423 	dd_new.dm_dev.mode |= new_mode;
424 	dd_new.dm_dev.bdev = NULL;
425 
426 	r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
427 	if (r)
428 		return r;
429 
430 	dd->dm_dev.mode |= new_mode;
431 	close_dev(&dd_old, md);
432 
433 	return 0;
434 }
435 
436 /*
437  * Add a device to the list, or just increment the usage count if
438  * it's already present.
439  */
440 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
441 		      const char *path, fmode_t mode, struct dm_dev **result)
442 {
443 	int r;
444 	dev_t uninitialized_var(dev);
445 	struct dm_dev_internal *dd;
446 	unsigned int major, minor;
447 
448 	BUG_ON(!t);
449 
450 	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
451 		/* Extract the major/minor numbers */
452 		dev = MKDEV(major, minor);
453 		if (MAJOR(dev) != major || MINOR(dev) != minor)
454 			return -EOVERFLOW;
455 	} else {
456 		/* convert the path to a device */
457 		struct block_device *bdev = lookup_bdev(path);
458 
459 		if (IS_ERR(bdev))
460 			return PTR_ERR(bdev);
461 		dev = bdev->bd_dev;
462 		bdput(bdev);
463 	}
464 
465 	dd = find_device(&t->devices, dev);
466 	if (!dd) {
467 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
468 		if (!dd)
469 			return -ENOMEM;
470 
471 		dd->dm_dev.mode = mode;
472 		dd->dm_dev.bdev = NULL;
473 
474 		if ((r = open_dev(dd, dev, t->md))) {
475 			kfree(dd);
476 			return r;
477 		}
478 
479 		format_dev_t(dd->dm_dev.name, dev);
480 
481 		atomic_set(&dd->count, 0);
482 		list_add(&dd->list, &t->devices);
483 
484 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
485 		r = upgrade_mode(dd, mode, t->md);
486 		if (r)
487 			return r;
488 	}
489 	atomic_inc(&dd->count);
490 
491 	*result = &dd->dm_dev;
492 	return 0;
493 }
494 
495 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
496 			 sector_t start, sector_t len, void *data)
497 {
498 	struct queue_limits *limits = data;
499 	struct block_device *bdev = dev->bdev;
500 	struct request_queue *q = bdev_get_queue(bdev);
501 	char b[BDEVNAME_SIZE];
502 
503 	if (unlikely(!q)) {
504 		DMWARN("%s: Cannot set limits for nonexistent device %s",
505 		       dm_device_name(ti->table->md), bdevname(bdev, b));
506 		return 0;
507 	}
508 
509 	if (bdev_stack_limits(limits, bdev, start) < 0)
510 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
511 		       "physical_block_size=%u, logical_block_size=%u, "
512 		       "alignment_offset=%u, start=%llu",
513 		       dm_device_name(ti->table->md), bdevname(bdev, b),
514 		       q->limits.physical_block_size,
515 		       q->limits.logical_block_size,
516 		       q->limits.alignment_offset,
517 		       (unsigned long long) start << SECTOR_SHIFT);
518 
519 	/*
520 	 * Check if merge fn is supported.
521 	 * If not we'll force DM to use PAGE_SIZE or
522 	 * smaller I/O, just to be safe.
523 	 */
524 
525 	if (q->merge_bvec_fn && !ti->type->merge)
526 		blk_limits_max_hw_sectors(limits,
527 					  (unsigned int) (PAGE_SIZE >> 9));
528 	return 0;
529 }
530 EXPORT_SYMBOL_GPL(dm_set_device_limits);
531 
532 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
533 		  struct dm_dev **result)
534 {
535 	return __table_get_device(ti->table, ti, path, mode, result);
536 }
537 
538 
539 /*
540  * Decrement a devices use count and remove it if necessary.
541  */
542 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
543 {
544 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
545 						  dm_dev);
546 
547 	if (atomic_dec_and_test(&dd->count)) {
548 		close_dev(dd, ti->table->md);
549 		list_del(&dd->list);
550 		kfree(dd);
551 	}
552 }
553 
554 /*
555  * Checks to see if the target joins onto the end of the table.
556  */
557 static int adjoin(struct dm_table *table, struct dm_target *ti)
558 {
559 	struct dm_target *prev;
560 
561 	if (!table->num_targets)
562 		return !ti->begin;
563 
564 	prev = &table->targets[table->num_targets - 1];
565 	return (ti->begin == (prev->begin + prev->len));
566 }
567 
568 /*
569  * Used to dynamically allocate the arg array.
570  */
571 static char **realloc_argv(unsigned *array_size, char **old_argv)
572 {
573 	char **argv;
574 	unsigned new_size;
575 
576 	new_size = *array_size ? *array_size * 2 : 64;
577 	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
578 	if (argv) {
579 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
580 		*array_size = new_size;
581 	}
582 
583 	kfree(old_argv);
584 	return argv;
585 }
586 
587 /*
588  * Destructively splits up the argument list to pass to ctr.
589  */
590 int dm_split_args(int *argc, char ***argvp, char *input)
591 {
592 	char *start, *end = input, *out, **argv = NULL;
593 	unsigned array_size = 0;
594 
595 	*argc = 0;
596 
597 	if (!input) {
598 		*argvp = NULL;
599 		return 0;
600 	}
601 
602 	argv = realloc_argv(&array_size, argv);
603 	if (!argv)
604 		return -ENOMEM;
605 
606 	while (1) {
607 		/* Skip whitespace */
608 		start = skip_spaces(end);
609 
610 		if (!*start)
611 			break;	/* success, we hit the end */
612 
613 		/* 'out' is used to remove any back-quotes */
614 		end = out = start;
615 		while (*end) {
616 			/* Everything apart from '\0' can be quoted */
617 			if (*end == '\\' && *(end + 1)) {
618 				*out++ = *(end + 1);
619 				end += 2;
620 				continue;
621 			}
622 
623 			if (isspace(*end))
624 				break;	/* end of token */
625 
626 			*out++ = *end++;
627 		}
628 
629 		/* have we already filled the array ? */
630 		if ((*argc + 1) > array_size) {
631 			argv = realloc_argv(&array_size, argv);
632 			if (!argv)
633 				return -ENOMEM;
634 		}
635 
636 		/* we know this is whitespace */
637 		if (*end)
638 			end++;
639 
640 		/* terminate the string and put it in the array */
641 		*out = '\0';
642 		argv[*argc] = start;
643 		(*argc)++;
644 	}
645 
646 	*argvp = argv;
647 	return 0;
648 }
649 
650 /*
651  * Impose necessary and sufficient conditions on a devices's table such
652  * that any incoming bio which respects its logical_block_size can be
653  * processed successfully.  If it falls across the boundary between
654  * two or more targets, the size of each piece it gets split into must
655  * be compatible with the logical_block_size of the target processing it.
656  */
657 static int validate_hardware_logical_block_alignment(struct dm_table *table,
658 						 struct queue_limits *limits)
659 {
660 	/*
661 	 * This function uses arithmetic modulo the logical_block_size
662 	 * (in units of 512-byte sectors).
663 	 */
664 	unsigned short device_logical_block_size_sects =
665 		limits->logical_block_size >> SECTOR_SHIFT;
666 
667 	/*
668 	 * Offset of the start of the next table entry, mod logical_block_size.
669 	 */
670 	unsigned short next_target_start = 0;
671 
672 	/*
673 	 * Given an aligned bio that extends beyond the end of a
674 	 * target, how many sectors must the next target handle?
675 	 */
676 	unsigned short remaining = 0;
677 
678 	struct dm_target *uninitialized_var(ti);
679 	struct queue_limits ti_limits;
680 	unsigned i = 0;
681 
682 	/*
683 	 * Check each entry in the table in turn.
684 	 */
685 	while (i < dm_table_get_num_targets(table)) {
686 		ti = dm_table_get_target(table, i++);
687 
688 		blk_set_default_limits(&ti_limits);
689 
690 		/* combine all target devices' limits */
691 		if (ti->type->iterate_devices)
692 			ti->type->iterate_devices(ti, dm_set_device_limits,
693 						  &ti_limits);
694 
695 		/*
696 		 * If the remaining sectors fall entirely within this
697 		 * table entry are they compatible with its logical_block_size?
698 		 */
699 		if (remaining < ti->len &&
700 		    remaining & ((ti_limits.logical_block_size >>
701 				  SECTOR_SHIFT) - 1))
702 			break;	/* Error */
703 
704 		next_target_start =
705 		    (unsigned short) ((next_target_start + ti->len) &
706 				      (device_logical_block_size_sects - 1));
707 		remaining = next_target_start ?
708 		    device_logical_block_size_sects - next_target_start : 0;
709 	}
710 
711 	if (remaining) {
712 		DMWARN("%s: table line %u (start sect %llu len %llu) "
713 		       "not aligned to h/w logical block size %u",
714 		       dm_device_name(table->md), i,
715 		       (unsigned long long) ti->begin,
716 		       (unsigned long long) ti->len,
717 		       limits->logical_block_size);
718 		return -EINVAL;
719 	}
720 
721 	return 0;
722 }
723 
724 int dm_table_add_target(struct dm_table *t, const char *type,
725 			sector_t start, sector_t len, char *params)
726 {
727 	int r = -EINVAL, argc;
728 	char **argv;
729 	struct dm_target *tgt;
730 
731 	if ((r = check_space(t)))
732 		return r;
733 
734 	tgt = t->targets + t->num_targets;
735 	memset(tgt, 0, sizeof(*tgt));
736 
737 	if (!len) {
738 		DMERR("%s: zero-length target", dm_device_name(t->md));
739 		return -EINVAL;
740 	}
741 
742 	tgt->type = dm_get_target_type(type);
743 	if (!tgt->type) {
744 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
745 		      type);
746 		return -EINVAL;
747 	}
748 
749 	tgt->table = t;
750 	tgt->begin = start;
751 	tgt->len = len;
752 	tgt->error = "Unknown error";
753 
754 	/*
755 	 * Does this target adjoin the previous one ?
756 	 */
757 	if (!adjoin(t, tgt)) {
758 		tgt->error = "Gap in table";
759 		r = -EINVAL;
760 		goto bad;
761 	}
762 
763 	r = dm_split_args(&argc, &argv, params);
764 	if (r) {
765 		tgt->error = "couldn't split parameters (insufficient memory)";
766 		goto bad;
767 	}
768 
769 	r = tgt->type->ctr(tgt, argc, argv);
770 	kfree(argv);
771 	if (r)
772 		goto bad;
773 
774 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
775 
776 	if (!tgt->num_discard_requests)
777 		t->discards_supported = 0;
778 
779 	return 0;
780 
781  bad:
782 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
783 	dm_put_target_type(tgt->type);
784 	return r;
785 }
786 
787 static int dm_table_set_type(struct dm_table *t)
788 {
789 	unsigned i;
790 	unsigned bio_based = 0, request_based = 0;
791 	struct dm_target *tgt;
792 	struct dm_dev_internal *dd;
793 	struct list_head *devices;
794 
795 	for (i = 0; i < t->num_targets; i++) {
796 		tgt = t->targets + i;
797 		if (dm_target_request_based(tgt))
798 			request_based = 1;
799 		else
800 			bio_based = 1;
801 
802 		if (bio_based && request_based) {
803 			DMWARN("Inconsistent table: different target types"
804 			       " can't be mixed up");
805 			return -EINVAL;
806 		}
807 	}
808 
809 	if (bio_based) {
810 		/* We must use this table as bio-based */
811 		t->type = DM_TYPE_BIO_BASED;
812 		return 0;
813 	}
814 
815 	BUG_ON(!request_based); /* No targets in this table */
816 
817 	/* Non-request-stackable devices can't be used for request-based dm */
818 	devices = dm_table_get_devices(t);
819 	list_for_each_entry(dd, devices, list) {
820 		if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
821 			DMWARN("table load rejected: including"
822 			       " non-request-stackable devices");
823 			return -EINVAL;
824 		}
825 	}
826 
827 	/*
828 	 * Request-based dm supports only tables that have a single target now.
829 	 * To support multiple targets, request splitting support is needed,
830 	 * and that needs lots of changes in the block-layer.
831 	 * (e.g. request completion process for partial completion.)
832 	 */
833 	if (t->num_targets > 1) {
834 		DMWARN("Request-based dm doesn't support multiple targets yet");
835 		return -EINVAL;
836 	}
837 
838 	t->type = DM_TYPE_REQUEST_BASED;
839 
840 	return 0;
841 }
842 
843 unsigned dm_table_get_type(struct dm_table *t)
844 {
845 	return t->type;
846 }
847 
848 bool dm_table_request_based(struct dm_table *t)
849 {
850 	return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
851 }
852 
853 int dm_table_alloc_md_mempools(struct dm_table *t)
854 {
855 	unsigned type = dm_table_get_type(t);
856 
857 	if (unlikely(type == DM_TYPE_NONE)) {
858 		DMWARN("no table type is set, can't allocate mempools");
859 		return -EINVAL;
860 	}
861 
862 	t->mempools = dm_alloc_md_mempools(type);
863 	if (!t->mempools)
864 		return -ENOMEM;
865 
866 	return 0;
867 }
868 
869 void dm_table_free_md_mempools(struct dm_table *t)
870 {
871 	dm_free_md_mempools(t->mempools);
872 	t->mempools = NULL;
873 }
874 
875 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
876 {
877 	return t->mempools;
878 }
879 
880 static int setup_indexes(struct dm_table *t)
881 {
882 	int i;
883 	unsigned int total = 0;
884 	sector_t *indexes;
885 
886 	/* allocate the space for *all* the indexes */
887 	for (i = t->depth - 2; i >= 0; i--) {
888 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
889 		total += t->counts[i];
890 	}
891 
892 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
893 	if (!indexes)
894 		return -ENOMEM;
895 
896 	/* set up internal nodes, bottom-up */
897 	for (i = t->depth - 2; i >= 0; i--) {
898 		t->index[i] = indexes;
899 		indexes += (KEYS_PER_NODE * t->counts[i]);
900 		setup_btree_index(i, t);
901 	}
902 
903 	return 0;
904 }
905 
906 /*
907  * Builds the btree to index the map.
908  */
909 static int dm_table_build_index(struct dm_table *t)
910 {
911 	int r = 0;
912 	unsigned int leaf_nodes;
913 
914 	/* how many indexes will the btree have ? */
915 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
916 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
917 
918 	/* leaf layer has already been set up */
919 	t->counts[t->depth - 1] = leaf_nodes;
920 	t->index[t->depth - 1] = t->highs;
921 
922 	if (t->depth >= 2)
923 		r = setup_indexes(t);
924 
925 	return r;
926 }
927 
928 /*
929  * Register the mapped device for blk_integrity support if
930  * the underlying devices support it.
931  */
932 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
933 {
934 	struct list_head *devices = dm_table_get_devices(t);
935 	struct dm_dev_internal *dd;
936 
937 	list_for_each_entry(dd, devices, list)
938 		if (bdev_get_integrity(dd->dm_dev.bdev))
939 			return blk_integrity_register(dm_disk(md), NULL);
940 
941 	return 0;
942 }
943 
944 /*
945  * Prepares the table for use by building the indices,
946  * setting the type, and allocating mempools.
947  */
948 int dm_table_complete(struct dm_table *t)
949 {
950 	int r;
951 
952 	r = dm_table_set_type(t);
953 	if (r) {
954 		DMERR("unable to set table type");
955 		return r;
956 	}
957 
958 	r = dm_table_build_index(t);
959 	if (r) {
960 		DMERR("unable to build btrees");
961 		return r;
962 	}
963 
964 	r = dm_table_prealloc_integrity(t, t->md);
965 	if (r) {
966 		DMERR("could not register integrity profile.");
967 		return r;
968 	}
969 
970 	r = dm_table_alloc_md_mempools(t);
971 	if (r)
972 		DMERR("unable to allocate mempools");
973 
974 	return r;
975 }
976 
977 static DEFINE_MUTEX(_event_lock);
978 void dm_table_event_callback(struct dm_table *t,
979 			     void (*fn)(void *), void *context)
980 {
981 	mutex_lock(&_event_lock);
982 	t->event_fn = fn;
983 	t->event_context = context;
984 	mutex_unlock(&_event_lock);
985 }
986 
987 void dm_table_event(struct dm_table *t)
988 {
989 	/*
990 	 * You can no longer call dm_table_event() from interrupt
991 	 * context, use a bottom half instead.
992 	 */
993 	BUG_ON(in_interrupt());
994 
995 	mutex_lock(&_event_lock);
996 	if (t->event_fn)
997 		t->event_fn(t->event_context);
998 	mutex_unlock(&_event_lock);
999 }
1000 
1001 sector_t dm_table_get_size(struct dm_table *t)
1002 {
1003 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1004 }
1005 
1006 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1007 {
1008 	if (index >= t->num_targets)
1009 		return NULL;
1010 
1011 	return t->targets + index;
1012 }
1013 
1014 /*
1015  * Search the btree for the correct target.
1016  *
1017  * Caller should check returned pointer with dm_target_is_valid()
1018  * to trap I/O beyond end of device.
1019  */
1020 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1021 {
1022 	unsigned int l, n = 0, k = 0;
1023 	sector_t *node;
1024 
1025 	for (l = 0; l < t->depth; l++) {
1026 		n = get_child(n, k);
1027 		node = get_node(t, l, n);
1028 
1029 		for (k = 0; k < KEYS_PER_NODE; k++)
1030 			if (node[k] >= sector)
1031 				break;
1032 	}
1033 
1034 	return &t->targets[(KEYS_PER_NODE * n) + k];
1035 }
1036 
1037 /*
1038  * Establish the new table's queue_limits and validate them.
1039  */
1040 int dm_calculate_queue_limits(struct dm_table *table,
1041 			      struct queue_limits *limits)
1042 {
1043 	struct dm_target *uninitialized_var(ti);
1044 	struct queue_limits ti_limits;
1045 	unsigned i = 0;
1046 
1047 	blk_set_default_limits(limits);
1048 
1049 	while (i < dm_table_get_num_targets(table)) {
1050 		blk_set_default_limits(&ti_limits);
1051 
1052 		ti = dm_table_get_target(table, i++);
1053 
1054 		if (!ti->type->iterate_devices)
1055 			goto combine_limits;
1056 
1057 		/*
1058 		 * Combine queue limits of all the devices this target uses.
1059 		 */
1060 		ti->type->iterate_devices(ti, dm_set_device_limits,
1061 					  &ti_limits);
1062 
1063 		/* Set I/O hints portion of queue limits */
1064 		if (ti->type->io_hints)
1065 			ti->type->io_hints(ti, &ti_limits);
1066 
1067 		/*
1068 		 * Check each device area is consistent with the target's
1069 		 * overall queue limits.
1070 		 */
1071 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1072 					      &ti_limits))
1073 			return -EINVAL;
1074 
1075 combine_limits:
1076 		/*
1077 		 * Merge this target's queue limits into the overall limits
1078 		 * for the table.
1079 		 */
1080 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1081 			DMWARN("%s: adding target device "
1082 			       "(start sect %llu len %llu) "
1083 			       "caused an alignment inconsistency",
1084 			       dm_device_name(table->md),
1085 			       (unsigned long long) ti->begin,
1086 			       (unsigned long long) ti->len);
1087 	}
1088 
1089 	return validate_hardware_logical_block_alignment(table, limits);
1090 }
1091 
1092 /*
1093  * Set the integrity profile for this device if all devices used have
1094  * matching profiles.
1095  */
1096 static void dm_table_set_integrity(struct dm_table *t)
1097 {
1098 	struct list_head *devices = dm_table_get_devices(t);
1099 	struct dm_dev_internal *prev = NULL, *dd = NULL;
1100 
1101 	if (!blk_get_integrity(dm_disk(t->md)))
1102 		return;
1103 
1104 	list_for_each_entry(dd, devices, list) {
1105 		if (prev &&
1106 		    blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
1107 					  dd->dm_dev.bdev->bd_disk) < 0) {
1108 			DMWARN("%s: integrity not set: %s and %s mismatch",
1109 			       dm_device_name(t->md),
1110 			       prev->dm_dev.bdev->bd_disk->disk_name,
1111 			       dd->dm_dev.bdev->bd_disk->disk_name);
1112 			goto no_integrity;
1113 		}
1114 		prev = dd;
1115 	}
1116 
1117 	if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1118 		goto no_integrity;
1119 
1120 	blk_integrity_register(dm_disk(t->md),
1121 			       bdev_get_integrity(prev->dm_dev.bdev));
1122 
1123 	return;
1124 
1125 no_integrity:
1126 	blk_integrity_register(dm_disk(t->md), NULL);
1127 
1128 	return;
1129 }
1130 
1131 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1132 			       struct queue_limits *limits)
1133 {
1134 	/*
1135 	 * Copy table's limits to the DM device's request_queue
1136 	 */
1137 	q->limits = *limits;
1138 
1139 	if (!dm_table_supports_discards(t))
1140 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1141 	else
1142 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1143 
1144 	dm_table_set_integrity(t);
1145 
1146 	/*
1147 	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1148 	 * visible to other CPUs because, once the flag is set, incoming bios
1149 	 * are processed by request-based dm, which refers to the queue
1150 	 * settings.
1151 	 * Until the flag set, bios are passed to bio-based dm and queued to
1152 	 * md->deferred where queue settings are not needed yet.
1153 	 * Those bios are passed to request-based dm at the resume time.
1154 	 */
1155 	smp_mb();
1156 	if (dm_table_request_based(t))
1157 		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1158 }
1159 
1160 unsigned int dm_table_get_num_targets(struct dm_table *t)
1161 {
1162 	return t->num_targets;
1163 }
1164 
1165 struct list_head *dm_table_get_devices(struct dm_table *t)
1166 {
1167 	return &t->devices;
1168 }
1169 
1170 fmode_t dm_table_get_mode(struct dm_table *t)
1171 {
1172 	return t->mode;
1173 }
1174 
1175 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1176 {
1177 	int i = t->num_targets;
1178 	struct dm_target *ti = t->targets;
1179 
1180 	while (i--) {
1181 		if (postsuspend) {
1182 			if (ti->type->postsuspend)
1183 				ti->type->postsuspend(ti);
1184 		} else if (ti->type->presuspend)
1185 			ti->type->presuspend(ti);
1186 
1187 		ti++;
1188 	}
1189 }
1190 
1191 void dm_table_presuspend_targets(struct dm_table *t)
1192 {
1193 	if (!t)
1194 		return;
1195 
1196 	suspend_targets(t, 0);
1197 }
1198 
1199 void dm_table_postsuspend_targets(struct dm_table *t)
1200 {
1201 	if (!t)
1202 		return;
1203 
1204 	suspend_targets(t, 1);
1205 }
1206 
1207 int dm_table_resume_targets(struct dm_table *t)
1208 {
1209 	int i, r = 0;
1210 
1211 	for (i = 0; i < t->num_targets; i++) {
1212 		struct dm_target *ti = t->targets + i;
1213 
1214 		if (!ti->type->preresume)
1215 			continue;
1216 
1217 		r = ti->type->preresume(ti);
1218 		if (r)
1219 			return r;
1220 	}
1221 
1222 	for (i = 0; i < t->num_targets; i++) {
1223 		struct dm_target *ti = t->targets + i;
1224 
1225 		if (ti->type->resume)
1226 			ti->type->resume(ti);
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1233 {
1234 	list_add(&cb->list, &t->target_callbacks);
1235 }
1236 EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1237 
1238 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1239 {
1240 	struct dm_dev_internal *dd;
1241 	struct list_head *devices = dm_table_get_devices(t);
1242 	struct dm_target_callbacks *cb;
1243 	int r = 0;
1244 
1245 	list_for_each_entry(dd, devices, list) {
1246 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1247 		char b[BDEVNAME_SIZE];
1248 
1249 		if (likely(q))
1250 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1251 		else
1252 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1253 				     dm_device_name(t->md),
1254 				     bdevname(dd->dm_dev.bdev, b));
1255 	}
1256 
1257 	list_for_each_entry(cb, &t->target_callbacks, list)
1258 		if (cb->congested_fn)
1259 			r |= cb->congested_fn(cb, bdi_bits);
1260 
1261 	return r;
1262 }
1263 
1264 int dm_table_any_busy_target(struct dm_table *t)
1265 {
1266 	unsigned i;
1267 	struct dm_target *ti;
1268 
1269 	for (i = 0; i < t->num_targets; i++) {
1270 		ti = t->targets + i;
1271 		if (ti->type->busy && ti->type->busy(ti))
1272 			return 1;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 void dm_table_unplug_all(struct dm_table *t)
1279 {
1280 	struct dm_dev_internal *dd;
1281 	struct list_head *devices = dm_table_get_devices(t);
1282 	struct dm_target_callbacks *cb;
1283 
1284 	list_for_each_entry(dd, devices, list) {
1285 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1286 		char b[BDEVNAME_SIZE];
1287 
1288 		if (likely(q))
1289 			blk_unplug(q);
1290 		else
1291 			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1292 				     dm_device_name(t->md),
1293 				     bdevname(dd->dm_dev.bdev, b));
1294 	}
1295 
1296 	list_for_each_entry(cb, &t->target_callbacks, list)
1297 		if (cb->unplug_fn)
1298 			cb->unplug_fn(cb);
1299 }
1300 
1301 struct mapped_device *dm_table_get_md(struct dm_table *t)
1302 {
1303 	return t->md;
1304 }
1305 
1306 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1307 				  sector_t start, sector_t len, void *data)
1308 {
1309 	struct request_queue *q = bdev_get_queue(dev->bdev);
1310 
1311 	return q && blk_queue_discard(q);
1312 }
1313 
1314 bool dm_table_supports_discards(struct dm_table *t)
1315 {
1316 	struct dm_target *ti;
1317 	unsigned i = 0;
1318 
1319 	if (!t->discards_supported)
1320 		return 0;
1321 
1322 	/*
1323 	 * Ensure that at least one underlying device supports discards.
1324 	 * t->devices includes internal dm devices such as mirror logs
1325 	 * so we need to use iterate_devices here, which targets
1326 	 * supporting discard must provide.
1327 	 */
1328 	while (i < dm_table_get_num_targets(t)) {
1329 		ti = dm_table_get_target(t, i++);
1330 
1331 		if (ti->type->iterate_devices &&
1332 		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1333 			return 1;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 EXPORT_SYMBOL(dm_vcalloc);
1340 EXPORT_SYMBOL(dm_get_device);
1341 EXPORT_SYMBOL(dm_put_device);
1342 EXPORT_SYMBOL(dm_table_event);
1343 EXPORT_SYMBOL(dm_table_get_size);
1344 EXPORT_SYMBOL(dm_table_get_mode);
1345 EXPORT_SYMBOL(dm_table_get_md);
1346 EXPORT_SYMBOL(dm_table_put);
1347 EXPORT_SYMBOL(dm_table_get);
1348 EXPORT_SYMBOL(dm_table_unplug_all);
1349