xref: /linux/drivers/md/dm-dust.c (revision cacddeab563be5850ec12ba6a1396e120f94a529)
1e4f3fabdSBryan Gurney // SPDX-License-Identifier: GPL-2.0
2e4f3fabdSBryan Gurney /*
3e4f3fabdSBryan Gurney  * Copyright (c) 2018 Red Hat, Inc.
4e4f3fabdSBryan Gurney  *
5e4f3fabdSBryan Gurney  * This is a test "dust" device, which fails reads on specified
6e4f3fabdSBryan Gurney  * sectors, emulating the behavior of a hard disk drive sending
7e4f3fabdSBryan Gurney  * a "Read Medium Error" sense.
8e4f3fabdSBryan Gurney  *
9e4f3fabdSBryan Gurney  */
10e4f3fabdSBryan Gurney 
11e4f3fabdSBryan Gurney #include <linux/device-mapper.h>
12e4f3fabdSBryan Gurney #include <linux/module.h>
13e4f3fabdSBryan Gurney #include <linux/rbtree.h>
14e4f3fabdSBryan Gurney 
15e4f3fabdSBryan Gurney #define DM_MSG_PREFIX "dust"
16e4f3fabdSBryan Gurney 
17e4f3fabdSBryan Gurney struct badblock {
18e4f3fabdSBryan Gurney 	struct rb_node node;
19e4f3fabdSBryan Gurney 	sector_t bb;
20e4f3fabdSBryan Gurney };
21e4f3fabdSBryan Gurney 
22e4f3fabdSBryan Gurney struct dust_device {
23e4f3fabdSBryan Gurney 	struct dm_dev *dev;
24e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
25e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
26e4f3fabdSBryan Gurney 	spinlock_t dust_lock;
27e4f3fabdSBryan Gurney 	unsigned int blksz;
28e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
29e4f3fabdSBryan Gurney 	sector_t start;
30e4f3fabdSBryan Gurney 	bool fail_read_on_bb:1;
31e4f3fabdSBryan Gurney 	bool quiet_mode:1;
32e4f3fabdSBryan Gurney };
33e4f3fabdSBryan Gurney 
34e4f3fabdSBryan Gurney static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
35e4f3fabdSBryan Gurney {
36e4f3fabdSBryan Gurney 	struct rb_node *node = root->rb_node;
37e4f3fabdSBryan Gurney 
38e4f3fabdSBryan Gurney 	while (node) {
39e4f3fabdSBryan Gurney 		struct badblock *bblk = rb_entry(node, struct badblock, node);
40e4f3fabdSBryan Gurney 
41e4f3fabdSBryan Gurney 		if (bblk->bb > blk)
42e4f3fabdSBryan Gurney 			node = node->rb_left;
43e4f3fabdSBryan Gurney 		else if (bblk->bb < blk)
44e4f3fabdSBryan Gurney 			node = node->rb_right;
45e4f3fabdSBryan Gurney 		else
46e4f3fabdSBryan Gurney 			return bblk;
47e4f3fabdSBryan Gurney 	}
48e4f3fabdSBryan Gurney 
49e4f3fabdSBryan Gurney 	return NULL;
50e4f3fabdSBryan Gurney }
51e4f3fabdSBryan Gurney 
52e4f3fabdSBryan Gurney static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
53e4f3fabdSBryan Gurney {
54e4f3fabdSBryan Gurney 	struct badblock *bblk;
55e4f3fabdSBryan Gurney 	struct rb_node **link = &root->rb_node, *parent = NULL;
56e4f3fabdSBryan Gurney 	sector_t value = new->bb;
57e4f3fabdSBryan Gurney 
58e4f3fabdSBryan Gurney 	while (*link) {
59e4f3fabdSBryan Gurney 		parent = *link;
60e4f3fabdSBryan Gurney 		bblk = rb_entry(parent, struct badblock, node);
61e4f3fabdSBryan Gurney 
62e4f3fabdSBryan Gurney 		if (bblk->bb > value)
63e4f3fabdSBryan Gurney 			link = &(*link)->rb_left;
64e4f3fabdSBryan Gurney 		else if (bblk->bb < value)
65e4f3fabdSBryan Gurney 			link = &(*link)->rb_right;
66e4f3fabdSBryan Gurney 		else
67e4f3fabdSBryan Gurney 			return false;
68e4f3fabdSBryan Gurney 	}
69e4f3fabdSBryan Gurney 
70e4f3fabdSBryan Gurney 	rb_link_node(&new->node, parent, link);
71e4f3fabdSBryan Gurney 	rb_insert_color(&new->node, root);
72e4f3fabdSBryan Gurney 
73e4f3fabdSBryan Gurney 	return true;
74e4f3fabdSBryan Gurney }
75e4f3fabdSBryan Gurney 
76e4f3fabdSBryan Gurney static int dust_remove_block(struct dust_device *dd, unsigned long long block)
77e4f3fabdSBryan Gurney {
78e4f3fabdSBryan Gurney 	struct badblock *bblock;
79e4f3fabdSBryan Gurney 	unsigned long flags;
80e4f3fabdSBryan Gurney 
81e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
82e4f3fabdSBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
83e4f3fabdSBryan Gurney 
84e4f3fabdSBryan Gurney 	if (bblock == NULL) {
85e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
86e4f3fabdSBryan Gurney 			DMERR("%s: block %llu not found in badblocklist",
87e4f3fabdSBryan Gurney 			      __func__, block);
88e4f3fabdSBryan Gurney 		}
89e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
90e4f3fabdSBryan Gurney 		return -EINVAL;
91e4f3fabdSBryan Gurney 	}
92e4f3fabdSBryan Gurney 
93e4f3fabdSBryan Gurney 	rb_erase(&bblock->node, &dd->badblocklist);
94e4f3fabdSBryan Gurney 	dd->badblock_count--;
95e4f3fabdSBryan Gurney 	if (!dd->quiet_mode)
96e4f3fabdSBryan Gurney 		DMINFO("%s: badblock removed at block %llu", __func__, block);
97e4f3fabdSBryan Gurney 	kfree(bblock);
98e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
99e4f3fabdSBryan Gurney 
100e4f3fabdSBryan Gurney 	return 0;
101e4f3fabdSBryan Gurney }
102e4f3fabdSBryan Gurney 
103e4f3fabdSBryan Gurney static int dust_add_block(struct dust_device *dd, unsigned long long block)
104e4f3fabdSBryan Gurney {
105e4f3fabdSBryan Gurney 	struct badblock *bblock;
106e4f3fabdSBryan Gurney 	unsigned long flags;
107e4f3fabdSBryan Gurney 
108e4f3fabdSBryan Gurney 	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
109e4f3fabdSBryan Gurney 	if (bblock == NULL) {
110e4f3fabdSBryan Gurney 		if (!dd->quiet_mode)
111e4f3fabdSBryan Gurney 			DMERR("%s: badblock allocation failed", __func__);
112e4f3fabdSBryan Gurney 		return -ENOMEM;
113e4f3fabdSBryan Gurney 	}
114e4f3fabdSBryan Gurney 
115e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
116e4f3fabdSBryan Gurney 	bblock->bb = block * dd->sect_per_block;
117e4f3fabdSBryan Gurney 	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
119e4f3fabdSBryan Gurney 			DMERR("%s: block %llu already in badblocklist",
120e4f3fabdSBryan Gurney 			      __func__, block);
121e4f3fabdSBryan Gurney 		}
122e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
123e4f3fabdSBryan Gurney 		kfree(bblock);
124e4f3fabdSBryan Gurney 		return -EINVAL;
125e4f3fabdSBryan Gurney 	}
126e4f3fabdSBryan Gurney 
127e4f3fabdSBryan Gurney 	dd->badblock_count++;
128e4f3fabdSBryan Gurney 	if (!dd->quiet_mode)
129e4f3fabdSBryan Gurney 		DMINFO("%s: badblock added at block %llu", __func__, block);
130e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
131e4f3fabdSBryan Gurney 
132e4f3fabdSBryan Gurney 	return 0;
133e4f3fabdSBryan Gurney }
134e4f3fabdSBryan Gurney 
135e4f3fabdSBryan Gurney static int dust_query_block(struct dust_device *dd, unsigned long long block)
136e4f3fabdSBryan Gurney {
137e4f3fabdSBryan Gurney 	struct badblock *bblock;
138e4f3fabdSBryan Gurney 	unsigned long flags;
139e4f3fabdSBryan Gurney 
140e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
141e4f3fabdSBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
142e4f3fabdSBryan Gurney 	if (bblock != NULL)
143e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu found in badblocklist", __func__, block);
144e4f3fabdSBryan Gurney 	else
145e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu not found in badblocklist", __func__, block);
146e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
147e4f3fabdSBryan Gurney 
148e4f3fabdSBryan Gurney 	return 0;
149e4f3fabdSBryan Gurney }
150e4f3fabdSBryan Gurney 
151e4f3fabdSBryan Gurney static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
152e4f3fabdSBryan Gurney {
153e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
154e4f3fabdSBryan Gurney 
155e4f3fabdSBryan Gurney 	if (bblk)
156e4f3fabdSBryan Gurney 		return DM_MAPIO_KILL;
157e4f3fabdSBryan Gurney 
158e4f3fabdSBryan Gurney 	return DM_MAPIO_REMAPPED;
159e4f3fabdSBryan Gurney }
160e4f3fabdSBryan Gurney 
161e4f3fabdSBryan Gurney static int dust_map_read(struct dust_device *dd, sector_t thisblock,
162e4f3fabdSBryan Gurney 			 bool fail_read_on_bb)
163e4f3fabdSBryan Gurney {
164e4f3fabdSBryan Gurney 	unsigned long flags;
165e4f3fabdSBryan Gurney 	int ret = DM_MAPIO_REMAPPED;
166e4f3fabdSBryan Gurney 
167e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
168e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
169e4f3fabdSBryan Gurney 		ret = __dust_map_read(dd, thisblock);
170e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
171e4f3fabdSBryan Gurney 	}
172e4f3fabdSBryan Gurney 
173e4f3fabdSBryan Gurney 	return ret;
174e4f3fabdSBryan Gurney }
175e4f3fabdSBryan Gurney 
176e4f3fabdSBryan Gurney static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
177e4f3fabdSBryan Gurney {
178e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
179e4f3fabdSBryan Gurney 
180e4f3fabdSBryan Gurney 	if (bblk) {
181e4f3fabdSBryan Gurney 		rb_erase(&bblk->node, &dd->badblocklist);
182e4f3fabdSBryan Gurney 		dd->badblock_count--;
183e4f3fabdSBryan Gurney 		kfree(bblk);
184e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
185e4f3fabdSBryan Gurney 			sector_div(thisblock, dd->sect_per_block);
186e4f3fabdSBryan Gurney 			DMINFO("block %llu removed from badblocklist by write",
187e4f3fabdSBryan Gurney 			       (unsigned long long)thisblock);
188e4f3fabdSBryan Gurney 		}
189e4f3fabdSBryan Gurney 	}
190e4f3fabdSBryan Gurney }
191e4f3fabdSBryan Gurney 
192e4f3fabdSBryan Gurney static int dust_map_write(struct dust_device *dd, sector_t thisblock,
193e4f3fabdSBryan Gurney 			  bool fail_read_on_bb)
194e4f3fabdSBryan Gurney {
195e4f3fabdSBryan Gurney 	unsigned long flags;
196e4f3fabdSBryan Gurney 
197e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
198e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
199e4f3fabdSBryan Gurney 		__dust_map_write(dd, thisblock);
200e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
201e4f3fabdSBryan Gurney 	}
202e4f3fabdSBryan Gurney 
203e4f3fabdSBryan Gurney 	return DM_MAPIO_REMAPPED;
204e4f3fabdSBryan Gurney }
205e4f3fabdSBryan Gurney 
206e4f3fabdSBryan Gurney static int dust_map(struct dm_target *ti, struct bio *bio)
207e4f3fabdSBryan Gurney {
208e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
209e4f3fabdSBryan Gurney 	int ret;
210e4f3fabdSBryan Gurney 
211e4f3fabdSBryan Gurney 	bio_set_dev(bio, dd->dev->bdev);
212e4f3fabdSBryan Gurney 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
213e4f3fabdSBryan Gurney 
214e4f3fabdSBryan Gurney 	if (bio_data_dir(bio) == READ)
215e4f3fabdSBryan Gurney 		ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
216e4f3fabdSBryan Gurney 	else
217e4f3fabdSBryan Gurney 		ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
218e4f3fabdSBryan Gurney 
219e4f3fabdSBryan Gurney 	return ret;
220e4f3fabdSBryan Gurney }
221e4f3fabdSBryan Gurney 
222e4f3fabdSBryan Gurney static bool __dust_clear_badblocks(struct rb_root *tree,
223e4f3fabdSBryan Gurney 				   unsigned long long count)
224e4f3fabdSBryan Gurney {
225e4f3fabdSBryan Gurney 	struct rb_node *node = NULL, *nnode = NULL;
226e4f3fabdSBryan Gurney 
227e4f3fabdSBryan Gurney 	nnode = rb_first(tree);
228e4f3fabdSBryan Gurney 	if (nnode == NULL) {
229e4f3fabdSBryan Gurney 		BUG_ON(count != 0);
230e4f3fabdSBryan Gurney 		return false;
231e4f3fabdSBryan Gurney 	}
232e4f3fabdSBryan Gurney 
233e4f3fabdSBryan Gurney 	while (nnode) {
234e4f3fabdSBryan Gurney 		node = nnode;
235e4f3fabdSBryan Gurney 		nnode = rb_next(node);
236e4f3fabdSBryan Gurney 		rb_erase(node, tree);
237e4f3fabdSBryan Gurney 		count--;
238e4f3fabdSBryan Gurney 		kfree(node);
239e4f3fabdSBryan Gurney 	}
240e4f3fabdSBryan Gurney 	BUG_ON(count != 0);
241e4f3fabdSBryan Gurney 	BUG_ON(tree->rb_node != NULL);
242e4f3fabdSBryan Gurney 
243e4f3fabdSBryan Gurney 	return true;
244e4f3fabdSBryan Gurney }
245e4f3fabdSBryan Gurney 
246e4f3fabdSBryan Gurney static int dust_clear_badblocks(struct dust_device *dd)
247e4f3fabdSBryan Gurney {
248e4f3fabdSBryan Gurney 	unsigned long flags;
249e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
250e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
251e4f3fabdSBryan Gurney 
252e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
253e4f3fabdSBryan Gurney 	badblocklist = dd->badblocklist;
254e4f3fabdSBryan Gurney 	badblock_count = dd->badblock_count;
255e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
256e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
257e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
258e4f3fabdSBryan Gurney 
259e4f3fabdSBryan Gurney 	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
260e4f3fabdSBryan Gurney 		DMINFO("%s: no badblocks found", __func__);
261e4f3fabdSBryan Gurney 	else
262e4f3fabdSBryan Gurney 		DMINFO("%s: badblocks cleared", __func__);
263e4f3fabdSBryan Gurney 
264e4f3fabdSBryan Gurney 	return 0;
265e4f3fabdSBryan Gurney }
266e4f3fabdSBryan Gurney 
267e4f3fabdSBryan Gurney /*
268e4f3fabdSBryan Gurney  * Target parameters:
269e4f3fabdSBryan Gurney  *
270e4f3fabdSBryan Gurney  * <device_path> <offset> <blksz>
271e4f3fabdSBryan Gurney  *
272e4f3fabdSBryan Gurney  * device_path: path to the block device
273e4f3fabdSBryan Gurney  * offset: offset to data area from start of device_path
274e4f3fabdSBryan Gurney  * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
275e4f3fabdSBryan Gurney  */
276e4f3fabdSBryan Gurney static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
277e4f3fabdSBryan Gurney {
278e4f3fabdSBryan Gurney 	struct dust_device *dd;
279e4f3fabdSBryan Gurney 	unsigned long long tmp;
280e4f3fabdSBryan Gurney 	char dummy;
281e4f3fabdSBryan Gurney 	unsigned int blksz;
282e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
283e4f3fabdSBryan Gurney 	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
284e4f3fabdSBryan Gurney 	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
285e4f3fabdSBryan Gurney 
286e4f3fabdSBryan Gurney 	if (argc != 3) {
287e4f3fabdSBryan Gurney 		ti->error = "Invalid argument count";
288e4f3fabdSBryan Gurney 		return -EINVAL;
289e4f3fabdSBryan Gurney 	}
290e4f3fabdSBryan Gurney 
291e4f3fabdSBryan Gurney 	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
292e4f3fabdSBryan Gurney 		ti->error = "Invalid block size parameter";
293e4f3fabdSBryan Gurney 		return -EINVAL;
294e4f3fabdSBryan Gurney 	}
295e4f3fabdSBryan Gurney 
296e4f3fabdSBryan Gurney 	if (blksz < 512) {
297e4f3fabdSBryan Gurney 		ti->error = "Block size must be at least 512";
298e4f3fabdSBryan Gurney 		return -EINVAL;
299e4f3fabdSBryan Gurney 	}
300e4f3fabdSBryan Gurney 
301e4f3fabdSBryan Gurney 	if (!is_power_of_2(blksz)) {
302e4f3fabdSBryan Gurney 		ti->error = "Block size must be a power of 2";
303e4f3fabdSBryan Gurney 		return -EINVAL;
304e4f3fabdSBryan Gurney 	}
305e4f3fabdSBryan Gurney 
306e4f3fabdSBryan Gurney 	if (to_sector(blksz) > max_block_sectors) {
307e4f3fabdSBryan Gurney 		ti->error = "Block size is too large";
308e4f3fabdSBryan Gurney 		return -EINVAL;
309e4f3fabdSBryan Gurney 	}
310e4f3fabdSBryan Gurney 
311e4f3fabdSBryan Gurney 	sect_per_block = (blksz >> SECTOR_SHIFT);
312e4f3fabdSBryan Gurney 
313e4f3fabdSBryan Gurney 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
314e4f3fabdSBryan Gurney 		ti->error = "Invalid device offset sector";
315e4f3fabdSBryan Gurney 		return -EINVAL;
316e4f3fabdSBryan Gurney 	}
317e4f3fabdSBryan Gurney 
318e4f3fabdSBryan Gurney 	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
319e4f3fabdSBryan Gurney 	if (dd == NULL) {
320e4f3fabdSBryan Gurney 		ti->error = "Cannot allocate context";
321e4f3fabdSBryan Gurney 		return -ENOMEM;
322e4f3fabdSBryan Gurney 	}
323e4f3fabdSBryan Gurney 
324e4f3fabdSBryan Gurney 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
325e4f3fabdSBryan Gurney 		ti->error = "Device lookup failed";
326e4f3fabdSBryan Gurney 		kfree(dd);
327e4f3fabdSBryan Gurney 		return -EINVAL;
328e4f3fabdSBryan Gurney 	}
329e4f3fabdSBryan Gurney 
330e4f3fabdSBryan Gurney 	dd->sect_per_block = sect_per_block;
331e4f3fabdSBryan Gurney 	dd->blksz = blksz;
332e4f3fabdSBryan Gurney 	dd->start = tmp;
333e4f3fabdSBryan Gurney 
334e4f3fabdSBryan Gurney 	/*
335e4f3fabdSBryan Gurney 	 * Whether to fail a read on a "bad" block.
336e4f3fabdSBryan Gurney 	 * Defaults to false; enabled later by message.
337e4f3fabdSBryan Gurney 	 */
338e4f3fabdSBryan Gurney 	dd->fail_read_on_bb = false;
339e4f3fabdSBryan Gurney 
340e4f3fabdSBryan Gurney 	/*
341e4f3fabdSBryan Gurney 	 * Initialize bad block list rbtree.
342e4f3fabdSBryan Gurney 	 */
343e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
344e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
345e4f3fabdSBryan Gurney 	spin_lock_init(&dd->dust_lock);
346e4f3fabdSBryan Gurney 
347e4f3fabdSBryan Gurney 	dd->quiet_mode = false;
348e4f3fabdSBryan Gurney 
349e4f3fabdSBryan Gurney 	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
350e4f3fabdSBryan Gurney 
351e4f3fabdSBryan Gurney 	ti->num_discard_bios = 1;
352e4f3fabdSBryan Gurney 	ti->num_flush_bios = 1;
353e4f3fabdSBryan Gurney 	ti->private = dd;
354e4f3fabdSBryan Gurney 
355e4f3fabdSBryan Gurney 	return 0;
356e4f3fabdSBryan Gurney }
357e4f3fabdSBryan Gurney 
358e4f3fabdSBryan Gurney static void dust_dtr(struct dm_target *ti)
359e4f3fabdSBryan Gurney {
360e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
361e4f3fabdSBryan Gurney 
362e4f3fabdSBryan Gurney 	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
363e4f3fabdSBryan Gurney 	dm_put_device(ti, dd->dev);
364e4f3fabdSBryan Gurney 	kfree(dd);
365e4f3fabdSBryan Gurney }
366e4f3fabdSBryan Gurney 
367e4f3fabdSBryan Gurney static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
368e4f3fabdSBryan Gurney 			char *result_buf, unsigned int maxlen)
369e4f3fabdSBryan Gurney {
370e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
371e4f3fabdSBryan Gurney 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
372e4f3fabdSBryan Gurney 	bool invalid_msg = false;
373e4f3fabdSBryan Gurney 	int result = -EINVAL;
374e4f3fabdSBryan Gurney 	unsigned long long tmp, block;
375e4f3fabdSBryan Gurney 	unsigned long flags;
376e4f3fabdSBryan Gurney 	char dummy;
377e4f3fabdSBryan Gurney 
378e4f3fabdSBryan Gurney 	if (argc == 1) {
379e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock") ||
380e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "removebadblock") ||
381e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "queryblock")) {
382e4f3fabdSBryan Gurney 			DMERR("%s requires an additional argument", argv[0]);
383e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "disable")) {
384e4f3fabdSBryan Gurney 			DMINFO("disabling read failures on bad sectors");
385e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = false;
386e4f3fabdSBryan Gurney 			result = 0;
387e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "enable")) {
388e4f3fabdSBryan Gurney 			DMINFO("enabling read failures on bad sectors");
389e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = true;
390e4f3fabdSBryan Gurney 			result = 0;
391e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "countbadblocks")) {
392e4f3fabdSBryan Gurney 			spin_lock_irqsave(&dd->dust_lock, flags);
393e4f3fabdSBryan Gurney 			DMINFO("countbadblocks: %llu badblock(s) found",
394e4f3fabdSBryan Gurney 			       dd->badblock_count);
395e4f3fabdSBryan Gurney 			spin_unlock_irqrestore(&dd->dust_lock, flags);
396e4f3fabdSBryan Gurney 			result = 0;
397e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
398e4f3fabdSBryan Gurney 			result = dust_clear_badblocks(dd);
399e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "quiet")) {
400e4f3fabdSBryan Gurney 			if (!dd->quiet_mode)
401e4f3fabdSBryan Gurney 				dd->quiet_mode = true;
402e4f3fabdSBryan Gurney 			else
403e4f3fabdSBryan Gurney 				dd->quiet_mode = false;
404e4f3fabdSBryan Gurney 			result = 0;
405e4f3fabdSBryan Gurney 		} else {
406e4f3fabdSBryan Gurney 			invalid_msg = true;
407e4f3fabdSBryan Gurney 		}
408e4f3fabdSBryan Gurney 	} else if (argc == 2) {
409e4f3fabdSBryan Gurney 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
410e4f3fabdSBryan Gurney 			return result;
411e4f3fabdSBryan Gurney 
412e4f3fabdSBryan Gurney 		block = tmp;
413e4f3fabdSBryan Gurney 		sector_div(size, dd->sect_per_block);
414*cacddeabSColin Ian King 		if (block > size) {
415e4f3fabdSBryan Gurney 			DMERR("selected block value out of range");
416e4f3fabdSBryan Gurney 			return result;
417e4f3fabdSBryan Gurney 		}
418e4f3fabdSBryan Gurney 
419e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock"))
420e4f3fabdSBryan Gurney 			result = dust_add_block(dd, block);
421e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "removebadblock"))
422e4f3fabdSBryan Gurney 			result = dust_remove_block(dd, block);
423e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "queryblock"))
424e4f3fabdSBryan Gurney 			result = dust_query_block(dd, block);
425e4f3fabdSBryan Gurney 		else
426e4f3fabdSBryan Gurney 			invalid_msg = true;
427e4f3fabdSBryan Gurney 
428e4f3fabdSBryan Gurney 	} else
429e4f3fabdSBryan Gurney 		DMERR("invalid number of arguments '%d'", argc);
430e4f3fabdSBryan Gurney 
431e4f3fabdSBryan Gurney 	if (invalid_msg)
432e4f3fabdSBryan Gurney 		DMERR("unrecognized message '%s' received", argv[0]);
433e4f3fabdSBryan Gurney 
434e4f3fabdSBryan Gurney 	return result;
435e4f3fabdSBryan Gurney }
436e4f3fabdSBryan Gurney 
437e4f3fabdSBryan Gurney static void dust_status(struct dm_target *ti, status_type_t type,
438e4f3fabdSBryan Gurney 			unsigned int status_flags, char *result, unsigned int maxlen)
439e4f3fabdSBryan Gurney {
440e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
441e4f3fabdSBryan Gurney 	unsigned int sz = 0;
442e4f3fabdSBryan Gurney 
443e4f3fabdSBryan Gurney 	switch (type) {
444e4f3fabdSBryan Gurney 	case STATUSTYPE_INFO:
445e4f3fabdSBryan Gurney 		DMEMIT("%s %s %s", dd->dev->name,
446e4f3fabdSBryan Gurney 		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
447e4f3fabdSBryan Gurney 		       dd->quiet_mode ? "quiet" : "verbose");
448e4f3fabdSBryan Gurney 		break;
449e4f3fabdSBryan Gurney 
450e4f3fabdSBryan Gurney 	case STATUSTYPE_TABLE:
451e4f3fabdSBryan Gurney 		DMEMIT("%s %llu %u", dd->dev->name,
452e4f3fabdSBryan Gurney 		       (unsigned long long)dd->start, dd->blksz);
453e4f3fabdSBryan Gurney 		break;
454e4f3fabdSBryan Gurney 	}
455e4f3fabdSBryan Gurney }
456e4f3fabdSBryan Gurney 
457e4f3fabdSBryan Gurney static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
458e4f3fabdSBryan Gurney {
459e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
460e4f3fabdSBryan Gurney 	struct dm_dev *dev = dd->dev;
461e4f3fabdSBryan Gurney 
462e4f3fabdSBryan Gurney 	*bdev = dev->bdev;
463e4f3fabdSBryan Gurney 
464e4f3fabdSBryan Gurney 	/*
465e4f3fabdSBryan Gurney 	 * Only pass ioctls through if the device sizes match exactly.
466e4f3fabdSBryan Gurney 	 */
467e4f3fabdSBryan Gurney 	if (dd->start ||
468e4f3fabdSBryan Gurney 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
469e4f3fabdSBryan Gurney 		return 1;
470e4f3fabdSBryan Gurney 
471e4f3fabdSBryan Gurney 	return 0;
472e4f3fabdSBryan Gurney }
473e4f3fabdSBryan Gurney 
474e4f3fabdSBryan Gurney static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
475e4f3fabdSBryan Gurney 				void *data)
476e4f3fabdSBryan Gurney {
477e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
478e4f3fabdSBryan Gurney 
479e4f3fabdSBryan Gurney 	return fn(ti, dd->dev, dd->start, ti->len, data);
480e4f3fabdSBryan Gurney }
481e4f3fabdSBryan Gurney 
482e4f3fabdSBryan Gurney static struct target_type dust_target = {
483e4f3fabdSBryan Gurney 	.name = "dust",
484e4f3fabdSBryan Gurney 	.version = {1, 0, 0},
485e4f3fabdSBryan Gurney 	.module = THIS_MODULE,
486e4f3fabdSBryan Gurney 	.ctr = dust_ctr,
487e4f3fabdSBryan Gurney 	.dtr = dust_dtr,
488e4f3fabdSBryan Gurney 	.iterate_devices = dust_iterate_devices,
489e4f3fabdSBryan Gurney 	.map = dust_map,
490e4f3fabdSBryan Gurney 	.message = dust_message,
491e4f3fabdSBryan Gurney 	.status = dust_status,
492e4f3fabdSBryan Gurney 	.prepare_ioctl = dust_prepare_ioctl,
493e4f3fabdSBryan Gurney };
494e4f3fabdSBryan Gurney 
495e4f3fabdSBryan Gurney int __init dm_dust_init(void)
496e4f3fabdSBryan Gurney {
497e4f3fabdSBryan Gurney 	int result = dm_register_target(&dust_target);
498e4f3fabdSBryan Gurney 
499e4f3fabdSBryan Gurney 	if (result < 0)
500e4f3fabdSBryan Gurney 		DMERR("dm_register_target failed %d", result);
501e4f3fabdSBryan Gurney 
502e4f3fabdSBryan Gurney 	return result;
503e4f3fabdSBryan Gurney }
504e4f3fabdSBryan Gurney 
505e4f3fabdSBryan Gurney void __exit dm_dust_exit(void)
506e4f3fabdSBryan Gurney {
507e4f3fabdSBryan Gurney 	dm_unregister_target(&dust_target);
508e4f3fabdSBryan Gurney }
509e4f3fabdSBryan Gurney 
510e4f3fabdSBryan Gurney module_init(dm_dust_init);
511e4f3fabdSBryan Gurney module_exit(dm_dust_exit);
512e4f3fabdSBryan Gurney 
513e4f3fabdSBryan Gurney MODULE_DESCRIPTION(DM_NAME " dust test target");
514e4f3fabdSBryan Gurney MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
515e4f3fabdSBryan Gurney MODULE_LICENSE("GPL");
516