xref: /linux/drivers/md/dm-table.c (revision cb299ba8b5ef2239429484072fea394cd7581bd7)
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <asm/atomic.h>
21 
22 #define DM_MSG_PREFIX "table"
23 
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 
29 /*
30  * The table has always exactly one reference from either mapped_device->map
31  * or hash_cell->new_map. This reference is not counted in table->holders.
32  * A pair of dm_create_table/dm_destroy_table functions is used for table
33  * creation/destruction.
34  *
35  * Temporary references from the other code increase table->holders. A pair
36  * of dm_table_get/dm_table_put functions is used to manipulate it.
37  *
38  * When the table is about to be destroyed, we wait for table->holders to
39  * drop to zero.
40  */
41 
42 struct dm_table {
43 	struct mapped_device *md;
44 	atomic_t holders;
45 	unsigned type;
46 
47 	/* btree table */
48 	unsigned int depth;
49 	unsigned int counts[MAX_DEPTH];	/* in nodes */
50 	sector_t *index[MAX_DEPTH];
51 
52 	unsigned int num_targets;
53 	unsigned int num_allocated;
54 	sector_t *highs;
55 	struct dm_target *targets;
56 
57 	unsigned discards_supported:1;
58 
59 	/*
60 	 * Indicates the rw permissions for the new logical
61 	 * device.  This should be a combination of FMODE_READ
62 	 * and FMODE_WRITE.
63 	 */
64 	fmode_t mode;
65 
66 	/* a list of devices used by this table */
67 	struct list_head devices;
68 
69 	/* events get handed up using this callback */
70 	void (*event_fn)(void *);
71 	void *event_context;
72 
73 	struct dm_md_mempools *mempools;
74 };
75 
76 /*
77  * Similar to ceiling(log_size(n))
78  */
79 static unsigned int int_log(unsigned int n, unsigned int base)
80 {
81 	int result = 0;
82 
83 	while (n > 1) {
84 		n = dm_div_up(n, base);
85 		result++;
86 	}
87 
88 	return result;
89 }
90 
91 /*
92  * Calculate the index of the child node of the n'th node k'th key.
93  */
94 static inline unsigned int get_child(unsigned int n, unsigned int k)
95 {
96 	return (n * CHILDREN_PER_NODE) + k;
97 }
98 
99 /*
100  * Return the n'th node of level l from table t.
101  */
102 static inline sector_t *get_node(struct dm_table *t,
103 				 unsigned int l, unsigned int n)
104 {
105 	return t->index[l] + (n * KEYS_PER_NODE);
106 }
107 
108 /*
109  * Return the highest key that you could lookup from the n'th
110  * node on level l of the btree.
111  */
112 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
113 {
114 	for (; l < t->depth - 1; l++)
115 		n = get_child(n, CHILDREN_PER_NODE - 1);
116 
117 	if (n >= t->counts[l])
118 		return (sector_t) - 1;
119 
120 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
121 }
122 
123 /*
124  * Fills in a level of the btree based on the highs of the level
125  * below it.
126  */
127 static int setup_btree_index(unsigned int l, struct dm_table *t)
128 {
129 	unsigned int n, k;
130 	sector_t *node;
131 
132 	for (n = 0U; n < t->counts[l]; n++) {
133 		node = get_node(t, l, n);
134 
135 		for (k = 0U; k < KEYS_PER_NODE; k++)
136 			node[k] = high(t, l + 1, get_child(n, k));
137 	}
138 
139 	return 0;
140 }
141 
142 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
143 {
144 	unsigned long size;
145 	void *addr;
146 
147 	/*
148 	 * Check that we're not going to overflow.
149 	 */
150 	if (nmemb > (ULONG_MAX / elem_size))
151 		return NULL;
152 
153 	size = nmemb * elem_size;
154 	addr = vmalloc(size);
155 	if (addr)
156 		memset(addr, 0, size);
157 
158 	return addr;
159 }
160 
161 /*
162  * highs, and targets are managed as dynamic arrays during a
163  * table load.
164  */
165 static int alloc_targets(struct dm_table *t, unsigned int num)
166 {
167 	sector_t *n_highs;
168 	struct dm_target *n_targets;
169 	int n = t->num_targets;
170 
171 	/*
172 	 * Allocate both the target array and offset array at once.
173 	 * Append an empty entry to catch sectors beyond the end of
174 	 * the device.
175 	 */
176 	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
177 					  sizeof(sector_t));
178 	if (!n_highs)
179 		return -ENOMEM;
180 
181 	n_targets = (struct dm_target *) (n_highs + num);
182 
183 	if (n) {
184 		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
185 		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
186 	}
187 
188 	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
189 	vfree(t->highs);
190 
191 	t->num_allocated = num;
192 	t->highs = n_highs;
193 	t->targets = n_targets;
194 
195 	return 0;
196 }
197 
198 int dm_table_create(struct dm_table **result, fmode_t mode,
199 		    unsigned num_targets, struct mapped_device *md)
200 {
201 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
202 
203 	if (!t)
204 		return -ENOMEM;
205 
206 	INIT_LIST_HEAD(&t->devices);
207 	atomic_set(&t->holders, 0);
208 	t->discards_supported = 1;
209 
210 	if (!num_targets)
211 		num_targets = KEYS_PER_NODE;
212 
213 	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
214 
215 	if (alloc_targets(t, num_targets)) {
216 		kfree(t);
217 		t = NULL;
218 		return -ENOMEM;
219 	}
220 
221 	t->mode = mode;
222 	t->md = md;
223 	*result = t;
224 	return 0;
225 }
226 
227 static void free_devices(struct list_head *devices)
228 {
229 	struct list_head *tmp, *next;
230 
231 	list_for_each_safe(tmp, next, devices) {
232 		struct dm_dev_internal *dd =
233 		    list_entry(tmp, struct dm_dev_internal, list);
234 		DMWARN("dm_table_destroy: dm_put_device call missing for %s",
235 		       dd->dm_dev.name);
236 		kfree(dd);
237 	}
238 }
239 
240 void dm_table_destroy(struct dm_table *t)
241 {
242 	unsigned int i;
243 
244 	if (!t)
245 		return;
246 
247 	while (atomic_read(&t->holders))
248 		msleep(1);
249 	smp_mb();
250 
251 	/* free the indexes */
252 	if (t->depth >= 2)
253 		vfree(t->index[t->depth - 2]);
254 
255 	/* free the targets */
256 	for (i = 0; i < t->num_targets; i++) {
257 		struct dm_target *tgt = t->targets + i;
258 
259 		if (tgt->type->dtr)
260 			tgt->type->dtr(tgt);
261 
262 		dm_put_target_type(tgt->type);
263 	}
264 
265 	vfree(t->highs);
266 
267 	/* free the device list */
268 	if (t->devices.next != &t->devices)
269 		free_devices(&t->devices);
270 
271 	dm_free_md_mempools(t->mempools);
272 
273 	kfree(t);
274 }
275 
276 void dm_table_get(struct dm_table *t)
277 {
278 	atomic_inc(&t->holders);
279 }
280 
281 void dm_table_put(struct dm_table *t)
282 {
283 	if (!t)
284 		return;
285 
286 	smp_mb__before_atomic_dec();
287 	atomic_dec(&t->holders);
288 }
289 
290 /*
291  * Checks to see if we need to extend highs or targets.
292  */
293 static inline int check_space(struct dm_table *t)
294 {
295 	if (t->num_targets >= t->num_allocated)
296 		return alloc_targets(t, t->num_allocated * 2);
297 
298 	return 0;
299 }
300 
301 /*
302  * See if we've already got a device in the list.
303  */
304 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
305 {
306 	struct dm_dev_internal *dd;
307 
308 	list_for_each_entry (dd, l, list)
309 		if (dd->dm_dev.bdev->bd_dev == dev)
310 			return dd;
311 
312 	return NULL;
313 }
314 
315 /*
316  * Open a device so we can use it as a map destination.
317  */
318 static int open_dev(struct dm_dev_internal *d, dev_t dev,
319 		    struct mapped_device *md)
320 {
321 	static char *_claim_ptr = "I belong to device-mapper";
322 	struct block_device *bdev;
323 
324 	int r;
325 
326 	BUG_ON(d->dm_dev.bdev);
327 
328 	bdev = open_by_devnum(dev, d->dm_dev.mode);
329 	if (IS_ERR(bdev))
330 		return PTR_ERR(bdev);
331 	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
332 	if (r)
333 		blkdev_put(bdev, d->dm_dev.mode);
334 	else
335 		d->dm_dev.bdev = bdev;
336 	return r;
337 }
338 
339 /*
340  * Close a device that we've been using.
341  */
342 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
343 {
344 	if (!d->dm_dev.bdev)
345 		return;
346 
347 	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
348 	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
349 	d->dm_dev.bdev = NULL;
350 }
351 
352 /*
353  * If possible, this checks an area of a destination device is invalid.
354  */
355 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
356 				  sector_t start, sector_t len, void *data)
357 {
358 	struct queue_limits *limits = data;
359 	struct block_device *bdev = dev->bdev;
360 	sector_t dev_size =
361 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
362 	unsigned short logical_block_size_sectors =
363 		limits->logical_block_size >> SECTOR_SHIFT;
364 	char b[BDEVNAME_SIZE];
365 
366 	if (!dev_size)
367 		return 0;
368 
369 	if ((start >= dev_size) || (start + len > dev_size)) {
370 		DMWARN("%s: %s too small for target: "
371 		       "start=%llu, len=%llu, dev_size=%llu",
372 		       dm_device_name(ti->table->md), bdevname(bdev, b),
373 		       (unsigned long long)start,
374 		       (unsigned long long)len,
375 		       (unsigned long long)dev_size);
376 		return 1;
377 	}
378 
379 	if (logical_block_size_sectors <= 1)
380 		return 0;
381 
382 	if (start & (logical_block_size_sectors - 1)) {
383 		DMWARN("%s: start=%llu not aligned to h/w "
384 		       "logical block size %u of %s",
385 		       dm_device_name(ti->table->md),
386 		       (unsigned long long)start,
387 		       limits->logical_block_size, bdevname(bdev, b));
388 		return 1;
389 	}
390 
391 	if (len & (logical_block_size_sectors - 1)) {
392 		DMWARN("%s: len=%llu not aligned to h/w "
393 		       "logical block size %u of %s",
394 		       dm_device_name(ti->table->md),
395 		       (unsigned long long)len,
396 		       limits->logical_block_size, bdevname(bdev, b));
397 		return 1;
398 	}
399 
400 	return 0;
401 }
402 
403 /*
404  * This upgrades the mode on an already open dm_dev, being
405  * careful to leave things as they were if we fail to reopen the
406  * device and not to touch the existing bdev field in case
407  * it is accessed concurrently inside dm_table_any_congested().
408  */
409 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
410 			struct mapped_device *md)
411 {
412 	int r;
413 	struct dm_dev_internal dd_new, dd_old;
414 
415 	dd_new = dd_old = *dd;
416 
417 	dd_new.dm_dev.mode |= new_mode;
418 	dd_new.dm_dev.bdev = NULL;
419 
420 	r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
421 	if (r)
422 		return r;
423 
424 	dd->dm_dev.mode |= new_mode;
425 	close_dev(&dd_old, md);
426 
427 	return 0;
428 }
429 
430 /*
431  * Add a device to the list, or just increment the usage count if
432  * it's already present.
433  */
434 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
435 		      const char *path, fmode_t mode, struct dm_dev **result)
436 {
437 	int r;
438 	dev_t uninitialized_var(dev);
439 	struct dm_dev_internal *dd;
440 	unsigned int major, minor;
441 
442 	BUG_ON(!t);
443 
444 	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
445 		/* Extract the major/minor numbers */
446 		dev = MKDEV(major, minor);
447 		if (MAJOR(dev) != major || MINOR(dev) != minor)
448 			return -EOVERFLOW;
449 	} else {
450 		/* convert the path to a device */
451 		struct block_device *bdev = lookup_bdev(path);
452 
453 		if (IS_ERR(bdev))
454 			return PTR_ERR(bdev);
455 		dev = bdev->bd_dev;
456 		bdput(bdev);
457 	}
458 
459 	dd = find_device(&t->devices, dev);
460 	if (!dd) {
461 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
462 		if (!dd)
463 			return -ENOMEM;
464 
465 		dd->dm_dev.mode = mode;
466 		dd->dm_dev.bdev = NULL;
467 
468 		if ((r = open_dev(dd, dev, t->md))) {
469 			kfree(dd);
470 			return r;
471 		}
472 
473 		format_dev_t(dd->dm_dev.name, dev);
474 
475 		atomic_set(&dd->count, 0);
476 		list_add(&dd->list, &t->devices);
477 
478 	} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
479 		r = upgrade_mode(dd, mode, t->md);
480 		if (r)
481 			return r;
482 	}
483 	atomic_inc(&dd->count);
484 
485 	*result = &dd->dm_dev;
486 	return 0;
487 }
488 
489 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
490 			 sector_t start, sector_t len, void *data)
491 {
492 	struct queue_limits *limits = data;
493 	struct block_device *bdev = dev->bdev;
494 	struct request_queue *q = bdev_get_queue(bdev);
495 	char b[BDEVNAME_SIZE];
496 
497 	if (unlikely(!q)) {
498 		DMWARN("%s: Cannot set limits for nonexistent device %s",
499 		       dm_device_name(ti->table->md), bdevname(bdev, b));
500 		return 0;
501 	}
502 
503 	if (bdev_stack_limits(limits, bdev, start) < 0)
504 		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
505 		       "physical_block_size=%u, logical_block_size=%u, "
506 		       "alignment_offset=%u, start=%llu",
507 		       dm_device_name(ti->table->md), bdevname(bdev, b),
508 		       q->limits.physical_block_size,
509 		       q->limits.logical_block_size,
510 		       q->limits.alignment_offset,
511 		       (unsigned long long) start << SECTOR_SHIFT);
512 
513 	/*
514 	 * Check if merge fn is supported.
515 	 * If not we'll force DM to use PAGE_SIZE or
516 	 * smaller I/O, just to be safe.
517 	 */
518 
519 	if (q->merge_bvec_fn && !ti->type->merge)
520 		limits->max_sectors =
521 			min_not_zero(limits->max_sectors,
522 				     (unsigned int) (PAGE_SIZE >> 9));
523 	return 0;
524 }
525 EXPORT_SYMBOL_GPL(dm_set_device_limits);
526 
527 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
528 		  struct dm_dev **result)
529 {
530 	return __table_get_device(ti->table, ti, path, mode, result);
531 }
532 
533 
534 /*
535  * Decrement a devices use count and remove it if necessary.
536  */
537 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
538 {
539 	struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
540 						  dm_dev);
541 
542 	if (atomic_dec_and_test(&dd->count)) {
543 		close_dev(dd, ti->table->md);
544 		list_del(&dd->list);
545 		kfree(dd);
546 	}
547 }
548 
549 /*
550  * Checks to see if the target joins onto the end of the table.
551  */
552 static int adjoin(struct dm_table *table, struct dm_target *ti)
553 {
554 	struct dm_target *prev;
555 
556 	if (!table->num_targets)
557 		return !ti->begin;
558 
559 	prev = &table->targets[table->num_targets - 1];
560 	return (ti->begin == (prev->begin + prev->len));
561 }
562 
563 /*
564  * Used to dynamically allocate the arg array.
565  */
566 static char **realloc_argv(unsigned *array_size, char **old_argv)
567 {
568 	char **argv;
569 	unsigned new_size;
570 
571 	new_size = *array_size ? *array_size * 2 : 64;
572 	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
573 	if (argv) {
574 		memcpy(argv, old_argv, *array_size * sizeof(*argv));
575 		*array_size = new_size;
576 	}
577 
578 	kfree(old_argv);
579 	return argv;
580 }
581 
582 /*
583  * Destructively splits up the argument list to pass to ctr.
584  */
585 int dm_split_args(int *argc, char ***argvp, char *input)
586 {
587 	char *start, *end = input, *out, **argv = NULL;
588 	unsigned array_size = 0;
589 
590 	*argc = 0;
591 
592 	if (!input) {
593 		*argvp = NULL;
594 		return 0;
595 	}
596 
597 	argv = realloc_argv(&array_size, argv);
598 	if (!argv)
599 		return -ENOMEM;
600 
601 	while (1) {
602 		/* Skip whitespace */
603 		start = skip_spaces(end);
604 
605 		if (!*start)
606 			break;	/* success, we hit the end */
607 
608 		/* 'out' is used to remove any back-quotes */
609 		end = out = start;
610 		while (*end) {
611 			/* Everything apart from '\0' can be quoted */
612 			if (*end == '\\' && *(end + 1)) {
613 				*out++ = *(end + 1);
614 				end += 2;
615 				continue;
616 			}
617 
618 			if (isspace(*end))
619 				break;	/* end of token */
620 
621 			*out++ = *end++;
622 		}
623 
624 		/* have we already filled the array ? */
625 		if ((*argc + 1) > array_size) {
626 			argv = realloc_argv(&array_size, argv);
627 			if (!argv)
628 				return -ENOMEM;
629 		}
630 
631 		/* we know this is whitespace */
632 		if (*end)
633 			end++;
634 
635 		/* terminate the string and put it in the array */
636 		*out = '\0';
637 		argv[*argc] = start;
638 		(*argc)++;
639 	}
640 
641 	*argvp = argv;
642 	return 0;
643 }
644 
645 /*
646  * Impose necessary and sufficient conditions on a devices's table such
647  * that any incoming bio which respects its logical_block_size can be
648  * processed successfully.  If it falls across the boundary between
649  * two or more targets, the size of each piece it gets split into must
650  * be compatible with the logical_block_size of the target processing it.
651  */
652 static int validate_hardware_logical_block_alignment(struct dm_table *table,
653 						 struct queue_limits *limits)
654 {
655 	/*
656 	 * This function uses arithmetic modulo the logical_block_size
657 	 * (in units of 512-byte sectors).
658 	 */
659 	unsigned short device_logical_block_size_sects =
660 		limits->logical_block_size >> SECTOR_SHIFT;
661 
662 	/*
663 	 * Offset of the start of the next table entry, mod logical_block_size.
664 	 */
665 	unsigned short next_target_start = 0;
666 
667 	/*
668 	 * Given an aligned bio that extends beyond the end of a
669 	 * target, how many sectors must the next target handle?
670 	 */
671 	unsigned short remaining = 0;
672 
673 	struct dm_target *uninitialized_var(ti);
674 	struct queue_limits ti_limits;
675 	unsigned i = 0;
676 
677 	/*
678 	 * Check each entry in the table in turn.
679 	 */
680 	while (i < dm_table_get_num_targets(table)) {
681 		ti = dm_table_get_target(table, i++);
682 
683 		blk_set_default_limits(&ti_limits);
684 
685 		/* combine all target devices' limits */
686 		if (ti->type->iterate_devices)
687 			ti->type->iterate_devices(ti, dm_set_device_limits,
688 						  &ti_limits);
689 
690 		/*
691 		 * If the remaining sectors fall entirely within this
692 		 * table entry are they compatible with its logical_block_size?
693 		 */
694 		if (remaining < ti->len &&
695 		    remaining & ((ti_limits.logical_block_size >>
696 				  SECTOR_SHIFT) - 1))
697 			break;	/* Error */
698 
699 		next_target_start =
700 		    (unsigned short) ((next_target_start + ti->len) &
701 				      (device_logical_block_size_sects - 1));
702 		remaining = next_target_start ?
703 		    device_logical_block_size_sects - next_target_start : 0;
704 	}
705 
706 	if (remaining) {
707 		DMWARN("%s: table line %u (start sect %llu len %llu) "
708 		       "not aligned to h/w logical block size %u",
709 		       dm_device_name(table->md), i,
710 		       (unsigned long long) ti->begin,
711 		       (unsigned long long) ti->len,
712 		       limits->logical_block_size);
713 		return -EINVAL;
714 	}
715 
716 	return 0;
717 }
718 
719 int dm_table_add_target(struct dm_table *t, const char *type,
720 			sector_t start, sector_t len, char *params)
721 {
722 	int r = -EINVAL, argc;
723 	char **argv;
724 	struct dm_target *tgt;
725 
726 	if ((r = check_space(t)))
727 		return r;
728 
729 	tgt = t->targets + t->num_targets;
730 	memset(tgt, 0, sizeof(*tgt));
731 
732 	if (!len) {
733 		DMERR("%s: zero-length target", dm_device_name(t->md));
734 		return -EINVAL;
735 	}
736 
737 	tgt->type = dm_get_target_type(type);
738 	if (!tgt->type) {
739 		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
740 		      type);
741 		return -EINVAL;
742 	}
743 
744 	tgt->table = t;
745 	tgt->begin = start;
746 	tgt->len = len;
747 	tgt->error = "Unknown error";
748 
749 	/*
750 	 * Does this target adjoin the previous one ?
751 	 */
752 	if (!adjoin(t, tgt)) {
753 		tgt->error = "Gap in table";
754 		r = -EINVAL;
755 		goto bad;
756 	}
757 
758 	r = dm_split_args(&argc, &argv, params);
759 	if (r) {
760 		tgt->error = "couldn't split parameters (insufficient memory)";
761 		goto bad;
762 	}
763 
764 	r = tgt->type->ctr(tgt, argc, argv);
765 	kfree(argv);
766 	if (r)
767 		goto bad;
768 
769 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
770 
771 	if (!tgt->num_discard_requests)
772 		t->discards_supported = 0;
773 
774 	return 0;
775 
776  bad:
777 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
778 	dm_put_target_type(tgt->type);
779 	return r;
780 }
781 
782 static int dm_table_set_type(struct dm_table *t)
783 {
784 	unsigned i;
785 	unsigned bio_based = 0, request_based = 0;
786 	struct dm_target *tgt;
787 	struct dm_dev_internal *dd;
788 	struct list_head *devices;
789 
790 	for (i = 0; i < t->num_targets; i++) {
791 		tgt = t->targets + i;
792 		if (dm_target_request_based(tgt))
793 			request_based = 1;
794 		else
795 			bio_based = 1;
796 
797 		if (bio_based && request_based) {
798 			DMWARN("Inconsistent table: different target types"
799 			       " can't be mixed up");
800 			return -EINVAL;
801 		}
802 	}
803 
804 	if (bio_based) {
805 		/* We must use this table as bio-based */
806 		t->type = DM_TYPE_BIO_BASED;
807 		return 0;
808 	}
809 
810 	BUG_ON(!request_based); /* No targets in this table */
811 
812 	/* Non-request-stackable devices can't be used for request-based dm */
813 	devices = dm_table_get_devices(t);
814 	list_for_each_entry(dd, devices, list) {
815 		if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
816 			DMWARN("table load rejected: including"
817 			       " non-request-stackable devices");
818 			return -EINVAL;
819 		}
820 	}
821 
822 	/*
823 	 * Request-based dm supports only tables that have a single target now.
824 	 * To support multiple targets, request splitting support is needed,
825 	 * and that needs lots of changes in the block-layer.
826 	 * (e.g. request completion process for partial completion.)
827 	 */
828 	if (t->num_targets > 1) {
829 		DMWARN("Request-based dm doesn't support multiple targets yet");
830 		return -EINVAL;
831 	}
832 
833 	t->type = DM_TYPE_REQUEST_BASED;
834 
835 	return 0;
836 }
837 
838 unsigned dm_table_get_type(struct dm_table *t)
839 {
840 	return t->type;
841 }
842 
843 bool dm_table_request_based(struct dm_table *t)
844 {
845 	return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
846 }
847 
848 int dm_table_alloc_md_mempools(struct dm_table *t)
849 {
850 	unsigned type = dm_table_get_type(t);
851 
852 	if (unlikely(type == DM_TYPE_NONE)) {
853 		DMWARN("no table type is set, can't allocate mempools");
854 		return -EINVAL;
855 	}
856 
857 	t->mempools = dm_alloc_md_mempools(type);
858 	if (!t->mempools)
859 		return -ENOMEM;
860 
861 	return 0;
862 }
863 
864 void dm_table_free_md_mempools(struct dm_table *t)
865 {
866 	dm_free_md_mempools(t->mempools);
867 	t->mempools = NULL;
868 }
869 
870 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
871 {
872 	return t->mempools;
873 }
874 
875 static int setup_indexes(struct dm_table *t)
876 {
877 	int i;
878 	unsigned int total = 0;
879 	sector_t *indexes;
880 
881 	/* allocate the space for *all* the indexes */
882 	for (i = t->depth - 2; i >= 0; i--) {
883 		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
884 		total += t->counts[i];
885 	}
886 
887 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
888 	if (!indexes)
889 		return -ENOMEM;
890 
891 	/* set up internal nodes, bottom-up */
892 	for (i = t->depth - 2; i >= 0; i--) {
893 		t->index[i] = indexes;
894 		indexes += (KEYS_PER_NODE * t->counts[i]);
895 		setup_btree_index(i, t);
896 	}
897 
898 	return 0;
899 }
900 
901 /*
902  * Builds the btree to index the map.
903  */
904 static int dm_table_build_index(struct dm_table *t)
905 {
906 	int r = 0;
907 	unsigned int leaf_nodes;
908 
909 	/* how many indexes will the btree have ? */
910 	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
911 	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
912 
913 	/* leaf layer has already been set up */
914 	t->counts[t->depth - 1] = leaf_nodes;
915 	t->index[t->depth - 1] = t->highs;
916 
917 	if (t->depth >= 2)
918 		r = setup_indexes(t);
919 
920 	return r;
921 }
922 
923 /*
924  * Register the mapped device for blk_integrity support if
925  * the underlying devices support it.
926  */
927 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
928 {
929 	struct list_head *devices = dm_table_get_devices(t);
930 	struct dm_dev_internal *dd;
931 
932 	list_for_each_entry(dd, devices, list)
933 		if (bdev_get_integrity(dd->dm_dev.bdev))
934 			return blk_integrity_register(dm_disk(md), NULL);
935 
936 	return 0;
937 }
938 
939 /*
940  * Prepares the table for use by building the indices,
941  * setting the type, and allocating mempools.
942  */
943 int dm_table_complete(struct dm_table *t)
944 {
945 	int r;
946 
947 	r = dm_table_set_type(t);
948 	if (r) {
949 		DMERR("unable to set table type");
950 		return r;
951 	}
952 
953 	r = dm_table_build_index(t);
954 	if (r) {
955 		DMERR("unable to build btrees");
956 		return r;
957 	}
958 
959 	r = dm_table_prealloc_integrity(t, t->md);
960 	if (r) {
961 		DMERR("could not register integrity profile.");
962 		return r;
963 	}
964 
965 	r = dm_table_alloc_md_mempools(t);
966 	if (r)
967 		DMERR("unable to allocate mempools");
968 
969 	return r;
970 }
971 
972 static DEFINE_MUTEX(_event_lock);
973 void dm_table_event_callback(struct dm_table *t,
974 			     void (*fn)(void *), void *context)
975 {
976 	mutex_lock(&_event_lock);
977 	t->event_fn = fn;
978 	t->event_context = context;
979 	mutex_unlock(&_event_lock);
980 }
981 
982 void dm_table_event(struct dm_table *t)
983 {
984 	/*
985 	 * You can no longer call dm_table_event() from interrupt
986 	 * context, use a bottom half instead.
987 	 */
988 	BUG_ON(in_interrupt());
989 
990 	mutex_lock(&_event_lock);
991 	if (t->event_fn)
992 		t->event_fn(t->event_context);
993 	mutex_unlock(&_event_lock);
994 }
995 
996 sector_t dm_table_get_size(struct dm_table *t)
997 {
998 	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
999 }
1000 
1001 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1002 {
1003 	if (index >= t->num_targets)
1004 		return NULL;
1005 
1006 	return t->targets + index;
1007 }
1008 
1009 /*
1010  * Search the btree for the correct target.
1011  *
1012  * Caller should check returned pointer with dm_target_is_valid()
1013  * to trap I/O beyond end of device.
1014  */
1015 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1016 {
1017 	unsigned int l, n = 0, k = 0;
1018 	sector_t *node;
1019 
1020 	for (l = 0; l < t->depth; l++) {
1021 		n = get_child(n, k);
1022 		node = get_node(t, l, n);
1023 
1024 		for (k = 0; k < KEYS_PER_NODE; k++)
1025 			if (node[k] >= sector)
1026 				break;
1027 	}
1028 
1029 	return &t->targets[(KEYS_PER_NODE * n) + k];
1030 }
1031 
1032 /*
1033  * Establish the new table's queue_limits and validate them.
1034  */
1035 int dm_calculate_queue_limits(struct dm_table *table,
1036 			      struct queue_limits *limits)
1037 {
1038 	struct dm_target *uninitialized_var(ti);
1039 	struct queue_limits ti_limits;
1040 	unsigned i = 0;
1041 
1042 	blk_set_default_limits(limits);
1043 
1044 	while (i < dm_table_get_num_targets(table)) {
1045 		blk_set_default_limits(&ti_limits);
1046 
1047 		ti = dm_table_get_target(table, i++);
1048 
1049 		if (!ti->type->iterate_devices)
1050 			goto combine_limits;
1051 
1052 		/*
1053 		 * Combine queue limits of all the devices this target uses.
1054 		 */
1055 		ti->type->iterate_devices(ti, dm_set_device_limits,
1056 					  &ti_limits);
1057 
1058 		/* Set I/O hints portion of queue limits */
1059 		if (ti->type->io_hints)
1060 			ti->type->io_hints(ti, &ti_limits);
1061 
1062 		/*
1063 		 * Check each device area is consistent with the target's
1064 		 * overall queue limits.
1065 		 */
1066 		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1067 					      &ti_limits))
1068 			return -EINVAL;
1069 
1070 combine_limits:
1071 		/*
1072 		 * Merge this target's queue limits into the overall limits
1073 		 * for the table.
1074 		 */
1075 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1076 			DMWARN("%s: adding target device "
1077 			       "(start sect %llu len %llu) "
1078 			       "caused an alignment inconsistency",
1079 			       dm_device_name(table->md),
1080 			       (unsigned long long) ti->begin,
1081 			       (unsigned long long) ti->len);
1082 	}
1083 
1084 	return validate_hardware_logical_block_alignment(table, limits);
1085 }
1086 
1087 /*
1088  * Set the integrity profile for this device if all devices used have
1089  * matching profiles.
1090  */
1091 static void dm_table_set_integrity(struct dm_table *t)
1092 {
1093 	struct list_head *devices = dm_table_get_devices(t);
1094 	struct dm_dev_internal *prev = NULL, *dd = NULL;
1095 
1096 	if (!blk_get_integrity(dm_disk(t->md)))
1097 		return;
1098 
1099 	list_for_each_entry(dd, devices, list) {
1100 		if (prev &&
1101 		    blk_integrity_compare(prev->dm_dev.bdev->bd_disk,
1102 					  dd->dm_dev.bdev->bd_disk) < 0) {
1103 			DMWARN("%s: integrity not set: %s and %s mismatch",
1104 			       dm_device_name(t->md),
1105 			       prev->dm_dev.bdev->bd_disk->disk_name,
1106 			       dd->dm_dev.bdev->bd_disk->disk_name);
1107 			goto no_integrity;
1108 		}
1109 		prev = dd;
1110 	}
1111 
1112 	if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1113 		goto no_integrity;
1114 
1115 	blk_integrity_register(dm_disk(t->md),
1116 			       bdev_get_integrity(prev->dm_dev.bdev));
1117 
1118 	return;
1119 
1120 no_integrity:
1121 	blk_integrity_register(dm_disk(t->md), NULL);
1122 
1123 	return;
1124 }
1125 
1126 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1127 			       struct queue_limits *limits)
1128 {
1129 	/*
1130 	 * Copy table's limits to the DM device's request_queue
1131 	 */
1132 	q->limits = *limits;
1133 
1134 	if (limits->no_cluster)
1135 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1136 	else
1137 		queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1138 
1139 	if (!dm_table_supports_discards(t))
1140 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1141 	else
1142 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1143 
1144 	dm_table_set_integrity(t);
1145 
1146 	/*
1147 	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1148 	 * visible to other CPUs because, once the flag is set, incoming bios
1149 	 * are processed by request-based dm, which refers to the queue
1150 	 * settings.
1151 	 * Until the flag set, bios are passed to bio-based dm and queued to
1152 	 * md->deferred where queue settings are not needed yet.
1153 	 * Those bios are passed to request-based dm at the resume time.
1154 	 */
1155 	smp_mb();
1156 	if (dm_table_request_based(t))
1157 		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1158 }
1159 
1160 unsigned int dm_table_get_num_targets(struct dm_table *t)
1161 {
1162 	return t->num_targets;
1163 }
1164 
1165 struct list_head *dm_table_get_devices(struct dm_table *t)
1166 {
1167 	return &t->devices;
1168 }
1169 
1170 fmode_t dm_table_get_mode(struct dm_table *t)
1171 {
1172 	return t->mode;
1173 }
1174 
1175 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1176 {
1177 	int i = t->num_targets;
1178 	struct dm_target *ti = t->targets;
1179 
1180 	while (i--) {
1181 		if (postsuspend) {
1182 			if (ti->type->postsuspend)
1183 				ti->type->postsuspend(ti);
1184 		} else if (ti->type->presuspend)
1185 			ti->type->presuspend(ti);
1186 
1187 		ti++;
1188 	}
1189 }
1190 
1191 void dm_table_presuspend_targets(struct dm_table *t)
1192 {
1193 	if (!t)
1194 		return;
1195 
1196 	suspend_targets(t, 0);
1197 }
1198 
1199 void dm_table_postsuspend_targets(struct dm_table *t)
1200 {
1201 	if (!t)
1202 		return;
1203 
1204 	suspend_targets(t, 1);
1205 }
1206 
1207 int dm_table_resume_targets(struct dm_table *t)
1208 {
1209 	int i, r = 0;
1210 
1211 	for (i = 0; i < t->num_targets; i++) {
1212 		struct dm_target *ti = t->targets + i;
1213 
1214 		if (!ti->type->preresume)
1215 			continue;
1216 
1217 		r = ti->type->preresume(ti);
1218 		if (r)
1219 			return r;
1220 	}
1221 
1222 	for (i = 0; i < t->num_targets; i++) {
1223 		struct dm_target *ti = t->targets + i;
1224 
1225 		if (ti->type->resume)
1226 			ti->type->resume(ti);
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1233 {
1234 	struct dm_dev_internal *dd;
1235 	struct list_head *devices = dm_table_get_devices(t);
1236 	int r = 0;
1237 
1238 	list_for_each_entry(dd, devices, list) {
1239 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1240 		char b[BDEVNAME_SIZE];
1241 
1242 		if (likely(q))
1243 			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1244 		else
1245 			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1246 				     dm_device_name(t->md),
1247 				     bdevname(dd->dm_dev.bdev, b));
1248 	}
1249 
1250 	return r;
1251 }
1252 
1253 int dm_table_any_busy_target(struct dm_table *t)
1254 {
1255 	unsigned i;
1256 	struct dm_target *ti;
1257 
1258 	for (i = 0; i < t->num_targets; i++) {
1259 		ti = t->targets + i;
1260 		if (ti->type->busy && ti->type->busy(ti))
1261 			return 1;
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 void dm_table_unplug_all(struct dm_table *t)
1268 {
1269 	struct dm_dev_internal *dd;
1270 	struct list_head *devices = dm_table_get_devices(t);
1271 
1272 	list_for_each_entry(dd, devices, list) {
1273 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1274 		char b[BDEVNAME_SIZE];
1275 
1276 		if (likely(q))
1277 			blk_unplug(q);
1278 		else
1279 			DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1280 				     dm_device_name(t->md),
1281 				     bdevname(dd->dm_dev.bdev, b));
1282 	}
1283 }
1284 
1285 struct mapped_device *dm_table_get_md(struct dm_table *t)
1286 {
1287 	return t->md;
1288 }
1289 
1290 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1291 				  sector_t start, sector_t len, void *data)
1292 {
1293 	struct request_queue *q = bdev_get_queue(dev->bdev);
1294 
1295 	return q && blk_queue_discard(q);
1296 }
1297 
1298 bool dm_table_supports_discards(struct dm_table *t)
1299 {
1300 	struct dm_target *ti;
1301 	unsigned i = 0;
1302 
1303 	if (!t->discards_supported)
1304 		return 0;
1305 
1306 	/*
1307 	 * Ensure that at least one underlying device supports discards.
1308 	 * t->devices includes internal dm devices such as mirror logs
1309 	 * so we need to use iterate_devices here, which targets
1310 	 * supporting discard must provide.
1311 	 */
1312 	while (i < dm_table_get_num_targets(t)) {
1313 		ti = dm_table_get_target(t, i++);
1314 
1315 		if (ti->type->iterate_devices &&
1316 		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1317 			return 1;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 EXPORT_SYMBOL(dm_vcalloc);
1324 EXPORT_SYMBOL(dm_get_device);
1325 EXPORT_SYMBOL(dm_put_device);
1326 EXPORT_SYMBOL(dm_table_event);
1327 EXPORT_SYMBOL(dm_table_get_size);
1328 EXPORT_SYMBOL(dm_table_get_mode);
1329 EXPORT_SYMBOL(dm_table_get_md);
1330 EXPORT_SYMBOL(dm_table_put);
1331 EXPORT_SYMBOL(dm_table_get);
1332 EXPORT_SYMBOL(dm_table_unplug_all);
1333