xref: /linux/drivers/md/dm-dust.c (revision cc7a7fb3b689996d35404080dde09de03fc1d09b)
1e4f3fabdSBryan Gurney // SPDX-License-Identifier: GPL-2.0
2e4f3fabdSBryan Gurney /*
3e4f3fabdSBryan Gurney  * Copyright (c) 2018 Red Hat, Inc.
4e4f3fabdSBryan Gurney  *
5e4f3fabdSBryan Gurney  * This is a test "dust" device, which fails reads on specified
6e4f3fabdSBryan Gurney  * sectors, emulating the behavior of a hard disk drive sending
7e4f3fabdSBryan Gurney  * a "Read Medium Error" sense.
8e4f3fabdSBryan Gurney  *
9e4f3fabdSBryan Gurney  */
10e4f3fabdSBryan Gurney 
11e4f3fabdSBryan Gurney #include <linux/device-mapper.h>
12e4f3fabdSBryan Gurney #include <linux/module.h>
13e4f3fabdSBryan Gurney #include <linux/rbtree.h>
14e4f3fabdSBryan Gurney 
15e4f3fabdSBryan Gurney #define DM_MSG_PREFIX "dust"
16e4f3fabdSBryan Gurney 
17e4f3fabdSBryan Gurney struct badblock {
18e4f3fabdSBryan Gurney 	struct rb_node node;
19e4f3fabdSBryan Gurney 	sector_t bb;
20e4f3fabdSBryan Gurney };
21e4f3fabdSBryan Gurney 
22e4f3fabdSBryan Gurney struct dust_device {
23e4f3fabdSBryan Gurney 	struct dm_dev *dev;
24e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
25e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
26e4f3fabdSBryan Gurney 	spinlock_t dust_lock;
27e4f3fabdSBryan Gurney 	unsigned int blksz;
2808c04c84SBryan Gurney 	int sect_per_block_shift;
29e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
30e4f3fabdSBryan Gurney 	sector_t start;
31e4f3fabdSBryan Gurney 	bool fail_read_on_bb:1;
32e4f3fabdSBryan Gurney 	bool quiet_mode:1;
33e4f3fabdSBryan Gurney };
34e4f3fabdSBryan Gurney 
35e4f3fabdSBryan Gurney static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
36e4f3fabdSBryan Gurney {
37e4f3fabdSBryan Gurney 	struct rb_node *node = root->rb_node;
38e4f3fabdSBryan Gurney 
39e4f3fabdSBryan Gurney 	while (node) {
40e4f3fabdSBryan Gurney 		struct badblock *bblk = rb_entry(node, struct badblock, node);
41e4f3fabdSBryan Gurney 
42e4f3fabdSBryan Gurney 		if (bblk->bb > blk)
43e4f3fabdSBryan Gurney 			node = node->rb_left;
44e4f3fabdSBryan Gurney 		else if (bblk->bb < blk)
45e4f3fabdSBryan Gurney 			node = node->rb_right;
46e4f3fabdSBryan Gurney 		else
47e4f3fabdSBryan Gurney 			return bblk;
48e4f3fabdSBryan Gurney 	}
49e4f3fabdSBryan Gurney 
50e4f3fabdSBryan Gurney 	return NULL;
51e4f3fabdSBryan Gurney }
52e4f3fabdSBryan Gurney 
53e4f3fabdSBryan Gurney static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
54e4f3fabdSBryan Gurney {
55e4f3fabdSBryan Gurney 	struct badblock *bblk;
56e4f3fabdSBryan Gurney 	struct rb_node **link = &root->rb_node, *parent = NULL;
57e4f3fabdSBryan Gurney 	sector_t value = new->bb;
58e4f3fabdSBryan Gurney 
59e4f3fabdSBryan Gurney 	while (*link) {
60e4f3fabdSBryan Gurney 		parent = *link;
61e4f3fabdSBryan Gurney 		bblk = rb_entry(parent, struct badblock, node);
62e4f3fabdSBryan Gurney 
63e4f3fabdSBryan Gurney 		if (bblk->bb > value)
64e4f3fabdSBryan Gurney 			link = &(*link)->rb_left;
65e4f3fabdSBryan Gurney 		else if (bblk->bb < value)
66e4f3fabdSBryan Gurney 			link = &(*link)->rb_right;
67e4f3fabdSBryan Gurney 		else
68e4f3fabdSBryan Gurney 			return false;
69e4f3fabdSBryan Gurney 	}
70e4f3fabdSBryan Gurney 
71e4f3fabdSBryan Gurney 	rb_link_node(&new->node, parent, link);
72e4f3fabdSBryan Gurney 	rb_insert_color(&new->node, root);
73e4f3fabdSBryan Gurney 
74e4f3fabdSBryan Gurney 	return true;
75e4f3fabdSBryan Gurney }
76e4f3fabdSBryan Gurney 
77e4f3fabdSBryan Gurney static int dust_remove_block(struct dust_device *dd, unsigned long long block)
78e4f3fabdSBryan Gurney {
79e4f3fabdSBryan Gurney 	struct badblock *bblock;
80e4f3fabdSBryan Gurney 	unsigned long flags;
81e4f3fabdSBryan Gurney 
82e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
8308c04c84SBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block);
84e4f3fabdSBryan Gurney 
85e4f3fabdSBryan Gurney 	if (bblock == NULL) {
86e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
87e4f3fabdSBryan Gurney 			DMERR("%s: block %llu not found in badblocklist",
88e4f3fabdSBryan Gurney 			      __func__, block);
89e4f3fabdSBryan Gurney 		}
90e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
91e4f3fabdSBryan Gurney 		return -EINVAL;
92e4f3fabdSBryan Gurney 	}
93e4f3fabdSBryan Gurney 
94e4f3fabdSBryan Gurney 	rb_erase(&bblock->node, &dd->badblocklist);
95e4f3fabdSBryan Gurney 	dd->badblock_count--;
96e4f3fabdSBryan Gurney 	if (!dd->quiet_mode)
97e4f3fabdSBryan Gurney 		DMINFO("%s: badblock removed at block %llu", __func__, block);
98e4f3fabdSBryan Gurney 	kfree(bblock);
99e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
100e4f3fabdSBryan Gurney 
101e4f3fabdSBryan Gurney 	return 0;
102e4f3fabdSBryan Gurney }
103e4f3fabdSBryan Gurney 
104e4f3fabdSBryan Gurney static int dust_add_block(struct dust_device *dd, unsigned long long block)
105e4f3fabdSBryan Gurney {
106e4f3fabdSBryan Gurney 	struct badblock *bblock;
107e4f3fabdSBryan Gurney 	unsigned long flags;
108e4f3fabdSBryan Gurney 
109e4f3fabdSBryan Gurney 	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
110e4f3fabdSBryan Gurney 	if (bblock == NULL) {
111e4f3fabdSBryan Gurney 		if (!dd->quiet_mode)
112e4f3fabdSBryan Gurney 			DMERR("%s: badblock allocation failed", __func__);
113e4f3fabdSBryan Gurney 		return -ENOMEM;
114e4f3fabdSBryan Gurney 	}
115e4f3fabdSBryan Gurney 
116e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
11708c04c84SBryan Gurney 	bblock->bb = block;
118e4f3fabdSBryan Gurney 	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
119e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
120e4f3fabdSBryan Gurney 			DMERR("%s: block %llu already in badblocklist",
121e4f3fabdSBryan Gurney 			      __func__, block);
122e4f3fabdSBryan Gurney 		}
123e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
124e4f3fabdSBryan Gurney 		kfree(bblock);
125e4f3fabdSBryan Gurney 		return -EINVAL;
126e4f3fabdSBryan Gurney 	}
127e4f3fabdSBryan Gurney 
128e4f3fabdSBryan Gurney 	dd->badblock_count++;
129e4f3fabdSBryan Gurney 	if (!dd->quiet_mode)
130e4f3fabdSBryan Gurney 		DMINFO("%s: badblock added at block %llu", __func__, block);
131e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
132e4f3fabdSBryan Gurney 
133e4f3fabdSBryan Gurney 	return 0;
134e4f3fabdSBryan Gurney }
135e4f3fabdSBryan Gurney 
136e4f3fabdSBryan Gurney static int dust_query_block(struct dust_device *dd, unsigned long long block)
137e4f3fabdSBryan Gurney {
138e4f3fabdSBryan Gurney 	struct badblock *bblock;
139e4f3fabdSBryan Gurney 	unsigned long flags;
140e4f3fabdSBryan Gurney 
141e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
14208c04c84SBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block);
143e4f3fabdSBryan Gurney 	if (bblock != NULL)
144e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu found in badblocklist", __func__, block);
145e4f3fabdSBryan Gurney 	else
146e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu not found in badblocklist", __func__, block);
147e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
148e4f3fabdSBryan Gurney 
149e4f3fabdSBryan Gurney 	return 0;
150e4f3fabdSBryan Gurney }
151e4f3fabdSBryan Gurney 
152e4f3fabdSBryan Gurney static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
153e4f3fabdSBryan Gurney {
154e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
155e4f3fabdSBryan Gurney 
156e4f3fabdSBryan Gurney 	if (bblk)
157e4f3fabdSBryan Gurney 		return DM_MAPIO_KILL;
158e4f3fabdSBryan Gurney 
159e4f3fabdSBryan Gurney 	return DM_MAPIO_REMAPPED;
160e4f3fabdSBryan Gurney }
161e4f3fabdSBryan Gurney 
162e4f3fabdSBryan Gurney static int dust_map_read(struct dust_device *dd, sector_t thisblock,
163e4f3fabdSBryan Gurney 			 bool fail_read_on_bb)
164e4f3fabdSBryan Gurney {
165e4f3fabdSBryan Gurney 	unsigned long flags;
166*cc7a7fb3SBryan Gurney 	int r = DM_MAPIO_REMAPPED;
167e4f3fabdSBryan Gurney 
168e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
16908c04c84SBryan Gurney 		thisblock >>= dd->sect_per_block_shift;
170e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
171*cc7a7fb3SBryan Gurney 		r = __dust_map_read(dd, thisblock);
172e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
173e4f3fabdSBryan Gurney 	}
174e4f3fabdSBryan Gurney 
175*cc7a7fb3SBryan Gurney 	return r;
176e4f3fabdSBryan Gurney }
177e4f3fabdSBryan Gurney 
178e4f3fabdSBryan Gurney static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
179e4f3fabdSBryan Gurney {
180e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
181e4f3fabdSBryan Gurney 
182e4f3fabdSBryan Gurney 	if (bblk) {
183e4f3fabdSBryan Gurney 		rb_erase(&bblk->node, &dd->badblocklist);
184e4f3fabdSBryan Gurney 		dd->badblock_count--;
185e4f3fabdSBryan Gurney 		kfree(bblk);
186e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
187e4f3fabdSBryan Gurney 			sector_div(thisblock, dd->sect_per_block);
188e4f3fabdSBryan Gurney 			DMINFO("block %llu removed from badblocklist by write",
189e4f3fabdSBryan Gurney 			       (unsigned long long)thisblock);
190e4f3fabdSBryan Gurney 		}
191e4f3fabdSBryan Gurney 	}
192e4f3fabdSBryan Gurney }
193e4f3fabdSBryan Gurney 
194e4f3fabdSBryan Gurney static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195e4f3fabdSBryan Gurney 			  bool fail_read_on_bb)
196e4f3fabdSBryan Gurney {
197e4f3fabdSBryan Gurney 	unsigned long flags;
198e4f3fabdSBryan Gurney 
199e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
20008c04c84SBryan Gurney 		thisblock >>= dd->sect_per_block_shift;
201e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
202e4f3fabdSBryan Gurney 		__dust_map_write(dd, thisblock);
203e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
204e4f3fabdSBryan Gurney 	}
205e4f3fabdSBryan Gurney 
206e4f3fabdSBryan Gurney 	return DM_MAPIO_REMAPPED;
207e4f3fabdSBryan Gurney }
208e4f3fabdSBryan Gurney 
209e4f3fabdSBryan Gurney static int dust_map(struct dm_target *ti, struct bio *bio)
210e4f3fabdSBryan Gurney {
211e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
212*cc7a7fb3SBryan Gurney 	int r;
213e4f3fabdSBryan Gurney 
214e4f3fabdSBryan Gurney 	bio_set_dev(bio, dd->dev->bdev);
215e4f3fabdSBryan Gurney 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
216e4f3fabdSBryan Gurney 
217e4f3fabdSBryan Gurney 	if (bio_data_dir(bio) == READ)
218*cc7a7fb3SBryan Gurney 		r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
219e4f3fabdSBryan Gurney 	else
220*cc7a7fb3SBryan Gurney 		r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
221e4f3fabdSBryan Gurney 
222*cc7a7fb3SBryan Gurney 	return r;
223e4f3fabdSBryan Gurney }
224e4f3fabdSBryan Gurney 
225e4f3fabdSBryan Gurney static bool __dust_clear_badblocks(struct rb_root *tree,
226e4f3fabdSBryan Gurney 				   unsigned long long count)
227e4f3fabdSBryan Gurney {
228e4f3fabdSBryan Gurney 	struct rb_node *node = NULL, *nnode = NULL;
229e4f3fabdSBryan Gurney 
230e4f3fabdSBryan Gurney 	nnode = rb_first(tree);
231e4f3fabdSBryan Gurney 	if (nnode == NULL) {
232e4f3fabdSBryan Gurney 		BUG_ON(count != 0);
233e4f3fabdSBryan Gurney 		return false;
234e4f3fabdSBryan Gurney 	}
235e4f3fabdSBryan Gurney 
236e4f3fabdSBryan Gurney 	while (nnode) {
237e4f3fabdSBryan Gurney 		node = nnode;
238e4f3fabdSBryan Gurney 		nnode = rb_next(node);
239e4f3fabdSBryan Gurney 		rb_erase(node, tree);
240e4f3fabdSBryan Gurney 		count--;
241e4f3fabdSBryan Gurney 		kfree(node);
242e4f3fabdSBryan Gurney 	}
243e4f3fabdSBryan Gurney 	BUG_ON(count != 0);
244e4f3fabdSBryan Gurney 	BUG_ON(tree->rb_node != NULL);
245e4f3fabdSBryan Gurney 
246e4f3fabdSBryan Gurney 	return true;
247e4f3fabdSBryan Gurney }
248e4f3fabdSBryan Gurney 
249e4f3fabdSBryan Gurney static int dust_clear_badblocks(struct dust_device *dd)
250e4f3fabdSBryan Gurney {
251e4f3fabdSBryan Gurney 	unsigned long flags;
252e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
253e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
254e4f3fabdSBryan Gurney 
255e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
256e4f3fabdSBryan Gurney 	badblocklist = dd->badblocklist;
257e4f3fabdSBryan Gurney 	badblock_count = dd->badblock_count;
258e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
259e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
260e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
261e4f3fabdSBryan Gurney 
262e4f3fabdSBryan Gurney 	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
263e4f3fabdSBryan Gurney 		DMINFO("%s: no badblocks found", __func__);
264e4f3fabdSBryan Gurney 	else
265e4f3fabdSBryan Gurney 		DMINFO("%s: badblocks cleared", __func__);
266e4f3fabdSBryan Gurney 
267e4f3fabdSBryan Gurney 	return 0;
268e4f3fabdSBryan Gurney }
269e4f3fabdSBryan Gurney 
270e4f3fabdSBryan Gurney /*
271e4f3fabdSBryan Gurney  * Target parameters:
272e4f3fabdSBryan Gurney  *
273e4f3fabdSBryan Gurney  * <device_path> <offset> <blksz>
274e4f3fabdSBryan Gurney  *
275e4f3fabdSBryan Gurney  * device_path: path to the block device
276e4f3fabdSBryan Gurney  * offset: offset to data area from start of device_path
277e4f3fabdSBryan Gurney  * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
278e4f3fabdSBryan Gurney  */
279e4f3fabdSBryan Gurney static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
280e4f3fabdSBryan Gurney {
281e4f3fabdSBryan Gurney 	struct dust_device *dd;
282e4f3fabdSBryan Gurney 	unsigned long long tmp;
283e4f3fabdSBryan Gurney 	char dummy;
284e4f3fabdSBryan Gurney 	unsigned int blksz;
285e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
286e4f3fabdSBryan Gurney 	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
287e4f3fabdSBryan Gurney 	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
288e4f3fabdSBryan Gurney 
289e4f3fabdSBryan Gurney 	if (argc != 3) {
290e4f3fabdSBryan Gurney 		ti->error = "Invalid argument count";
291e4f3fabdSBryan Gurney 		return -EINVAL;
292e4f3fabdSBryan Gurney 	}
293e4f3fabdSBryan Gurney 
294e4f3fabdSBryan Gurney 	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
295e4f3fabdSBryan Gurney 		ti->error = "Invalid block size parameter";
296e4f3fabdSBryan Gurney 		return -EINVAL;
297e4f3fabdSBryan Gurney 	}
298e4f3fabdSBryan Gurney 
299e4f3fabdSBryan Gurney 	if (blksz < 512) {
300e4f3fabdSBryan Gurney 		ti->error = "Block size must be at least 512";
301e4f3fabdSBryan Gurney 		return -EINVAL;
302e4f3fabdSBryan Gurney 	}
303e4f3fabdSBryan Gurney 
304e4f3fabdSBryan Gurney 	if (!is_power_of_2(blksz)) {
305e4f3fabdSBryan Gurney 		ti->error = "Block size must be a power of 2";
306e4f3fabdSBryan Gurney 		return -EINVAL;
307e4f3fabdSBryan Gurney 	}
308e4f3fabdSBryan Gurney 
309e4f3fabdSBryan Gurney 	if (to_sector(blksz) > max_block_sectors) {
310e4f3fabdSBryan Gurney 		ti->error = "Block size is too large";
311e4f3fabdSBryan Gurney 		return -EINVAL;
312e4f3fabdSBryan Gurney 	}
313e4f3fabdSBryan Gurney 
314e4f3fabdSBryan Gurney 	sect_per_block = (blksz >> SECTOR_SHIFT);
315e4f3fabdSBryan Gurney 
316e4f3fabdSBryan Gurney 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
317e4f3fabdSBryan Gurney 		ti->error = "Invalid device offset sector";
318e4f3fabdSBryan Gurney 		return -EINVAL;
319e4f3fabdSBryan Gurney 	}
320e4f3fabdSBryan Gurney 
321e4f3fabdSBryan Gurney 	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
322e4f3fabdSBryan Gurney 	if (dd == NULL) {
323e4f3fabdSBryan Gurney 		ti->error = "Cannot allocate context";
324e4f3fabdSBryan Gurney 		return -ENOMEM;
325e4f3fabdSBryan Gurney 	}
326e4f3fabdSBryan Gurney 
327e4f3fabdSBryan Gurney 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
328e4f3fabdSBryan Gurney 		ti->error = "Device lookup failed";
329e4f3fabdSBryan Gurney 		kfree(dd);
330e4f3fabdSBryan Gurney 		return -EINVAL;
331e4f3fabdSBryan Gurney 	}
332e4f3fabdSBryan Gurney 
333e4f3fabdSBryan Gurney 	dd->sect_per_block = sect_per_block;
334e4f3fabdSBryan Gurney 	dd->blksz = blksz;
335e4f3fabdSBryan Gurney 	dd->start = tmp;
336e4f3fabdSBryan Gurney 
33708c04c84SBryan Gurney 	dd->sect_per_block_shift = __ffs(sect_per_block);
33808c04c84SBryan Gurney 
339e4f3fabdSBryan Gurney 	/*
340e4f3fabdSBryan Gurney 	 * Whether to fail a read on a "bad" block.
341e4f3fabdSBryan Gurney 	 * Defaults to false; enabled later by message.
342e4f3fabdSBryan Gurney 	 */
343e4f3fabdSBryan Gurney 	dd->fail_read_on_bb = false;
344e4f3fabdSBryan Gurney 
345e4f3fabdSBryan Gurney 	/*
346e4f3fabdSBryan Gurney 	 * Initialize bad block list rbtree.
347e4f3fabdSBryan Gurney 	 */
348e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
349e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
350e4f3fabdSBryan Gurney 	spin_lock_init(&dd->dust_lock);
351e4f3fabdSBryan Gurney 
352e4f3fabdSBryan Gurney 	dd->quiet_mode = false;
353e4f3fabdSBryan Gurney 
354e4f3fabdSBryan Gurney 	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
355e4f3fabdSBryan Gurney 
356e4f3fabdSBryan Gurney 	ti->num_discard_bios = 1;
357e4f3fabdSBryan Gurney 	ti->num_flush_bios = 1;
358e4f3fabdSBryan Gurney 	ti->private = dd;
359e4f3fabdSBryan Gurney 
360e4f3fabdSBryan Gurney 	return 0;
361e4f3fabdSBryan Gurney }
362e4f3fabdSBryan Gurney 
363e4f3fabdSBryan Gurney static void dust_dtr(struct dm_target *ti)
364e4f3fabdSBryan Gurney {
365e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
366e4f3fabdSBryan Gurney 
367e4f3fabdSBryan Gurney 	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
368e4f3fabdSBryan Gurney 	dm_put_device(ti, dd->dev);
369e4f3fabdSBryan Gurney 	kfree(dd);
370e4f3fabdSBryan Gurney }
371e4f3fabdSBryan Gurney 
372e4f3fabdSBryan Gurney static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
373e4f3fabdSBryan Gurney 			char *result_buf, unsigned int maxlen)
374e4f3fabdSBryan Gurney {
375e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
376e4f3fabdSBryan Gurney 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
377e4f3fabdSBryan Gurney 	bool invalid_msg = false;
3786ec1be50SBryan Gurney 	int r = -EINVAL;
379e4f3fabdSBryan Gurney 	unsigned long long tmp, block;
380e4f3fabdSBryan Gurney 	unsigned long flags;
381e4f3fabdSBryan Gurney 	char dummy;
382e4f3fabdSBryan Gurney 
383e4f3fabdSBryan Gurney 	if (argc == 1) {
384e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock") ||
385e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "removebadblock") ||
386e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "queryblock")) {
387e4f3fabdSBryan Gurney 			DMERR("%s requires an additional argument", argv[0]);
388e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "disable")) {
389e4f3fabdSBryan Gurney 			DMINFO("disabling read failures on bad sectors");
390e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = false;
3916ec1be50SBryan Gurney 			r = 0;
392e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "enable")) {
393e4f3fabdSBryan Gurney 			DMINFO("enabling read failures on bad sectors");
394e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = true;
3956ec1be50SBryan Gurney 			r = 0;
396e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "countbadblocks")) {
397e4f3fabdSBryan Gurney 			spin_lock_irqsave(&dd->dust_lock, flags);
398e4f3fabdSBryan Gurney 			DMINFO("countbadblocks: %llu badblock(s) found",
399e4f3fabdSBryan Gurney 			       dd->badblock_count);
400e4f3fabdSBryan Gurney 			spin_unlock_irqrestore(&dd->dust_lock, flags);
4016ec1be50SBryan Gurney 			r = 0;
402e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
4036ec1be50SBryan Gurney 			r = dust_clear_badblocks(dd);
404e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "quiet")) {
405e4f3fabdSBryan Gurney 			if (!dd->quiet_mode)
406e4f3fabdSBryan Gurney 				dd->quiet_mode = true;
407e4f3fabdSBryan Gurney 			else
408e4f3fabdSBryan Gurney 				dd->quiet_mode = false;
4096ec1be50SBryan Gurney 			r = 0;
410e4f3fabdSBryan Gurney 		} else {
411e4f3fabdSBryan Gurney 			invalid_msg = true;
412e4f3fabdSBryan Gurney 		}
413e4f3fabdSBryan Gurney 	} else if (argc == 2) {
414e4f3fabdSBryan Gurney 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
4156ec1be50SBryan Gurney 			return r;
416e4f3fabdSBryan Gurney 
417e4f3fabdSBryan Gurney 		block = tmp;
418e4f3fabdSBryan Gurney 		sector_div(size, dd->sect_per_block);
419cacddeabSColin Ian King 		if (block > size) {
420e4f3fabdSBryan Gurney 			DMERR("selected block value out of range");
4216ec1be50SBryan Gurney 			return r;
422e4f3fabdSBryan Gurney 		}
423e4f3fabdSBryan Gurney 
424e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock"))
4256ec1be50SBryan Gurney 			r = dust_add_block(dd, block);
426e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "removebadblock"))
4276ec1be50SBryan Gurney 			r = dust_remove_block(dd, block);
428e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "queryblock"))
4296ec1be50SBryan Gurney 			r = dust_query_block(dd, block);
430e4f3fabdSBryan Gurney 		else
431e4f3fabdSBryan Gurney 			invalid_msg = true;
432e4f3fabdSBryan Gurney 
433e4f3fabdSBryan Gurney 	} else
434e4f3fabdSBryan Gurney 		DMERR("invalid number of arguments '%d'", argc);
435e4f3fabdSBryan Gurney 
436e4f3fabdSBryan Gurney 	if (invalid_msg)
437e4f3fabdSBryan Gurney 		DMERR("unrecognized message '%s' received", argv[0]);
438e4f3fabdSBryan Gurney 
4396ec1be50SBryan Gurney 	return r;
440e4f3fabdSBryan Gurney }
441e4f3fabdSBryan Gurney 
442e4f3fabdSBryan Gurney static void dust_status(struct dm_target *ti, status_type_t type,
443e4f3fabdSBryan Gurney 			unsigned int status_flags, char *result, unsigned int maxlen)
444e4f3fabdSBryan Gurney {
445e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
446e4f3fabdSBryan Gurney 	unsigned int sz = 0;
447e4f3fabdSBryan Gurney 
448e4f3fabdSBryan Gurney 	switch (type) {
449e4f3fabdSBryan Gurney 	case STATUSTYPE_INFO:
450e4f3fabdSBryan Gurney 		DMEMIT("%s %s %s", dd->dev->name,
451e4f3fabdSBryan Gurney 		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
452e4f3fabdSBryan Gurney 		       dd->quiet_mode ? "quiet" : "verbose");
453e4f3fabdSBryan Gurney 		break;
454e4f3fabdSBryan Gurney 
455e4f3fabdSBryan Gurney 	case STATUSTYPE_TABLE:
456e4f3fabdSBryan Gurney 		DMEMIT("%s %llu %u", dd->dev->name,
457e4f3fabdSBryan Gurney 		       (unsigned long long)dd->start, dd->blksz);
458e4f3fabdSBryan Gurney 		break;
459e4f3fabdSBryan Gurney 	}
460e4f3fabdSBryan Gurney }
461e4f3fabdSBryan Gurney 
462e4f3fabdSBryan Gurney static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
463e4f3fabdSBryan Gurney {
464e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
465e4f3fabdSBryan Gurney 	struct dm_dev *dev = dd->dev;
466e4f3fabdSBryan Gurney 
467e4f3fabdSBryan Gurney 	*bdev = dev->bdev;
468e4f3fabdSBryan Gurney 
469e4f3fabdSBryan Gurney 	/*
470e4f3fabdSBryan Gurney 	 * Only pass ioctls through if the device sizes match exactly.
471e4f3fabdSBryan Gurney 	 */
472e4f3fabdSBryan Gurney 	if (dd->start ||
473e4f3fabdSBryan Gurney 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
474e4f3fabdSBryan Gurney 		return 1;
475e4f3fabdSBryan Gurney 
476e4f3fabdSBryan Gurney 	return 0;
477e4f3fabdSBryan Gurney }
478e4f3fabdSBryan Gurney 
479e4f3fabdSBryan Gurney static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
480e4f3fabdSBryan Gurney 				void *data)
481e4f3fabdSBryan Gurney {
482e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
483e4f3fabdSBryan Gurney 
484e4f3fabdSBryan Gurney 	return fn(ti, dd->dev, dd->start, ti->len, data);
485e4f3fabdSBryan Gurney }
486e4f3fabdSBryan Gurney 
487e4f3fabdSBryan Gurney static struct target_type dust_target = {
488e4f3fabdSBryan Gurney 	.name = "dust",
489e4f3fabdSBryan Gurney 	.version = {1, 0, 0},
490e4f3fabdSBryan Gurney 	.module = THIS_MODULE,
491e4f3fabdSBryan Gurney 	.ctr = dust_ctr,
492e4f3fabdSBryan Gurney 	.dtr = dust_dtr,
493e4f3fabdSBryan Gurney 	.iterate_devices = dust_iterate_devices,
494e4f3fabdSBryan Gurney 	.map = dust_map,
495e4f3fabdSBryan Gurney 	.message = dust_message,
496e4f3fabdSBryan Gurney 	.status = dust_status,
497e4f3fabdSBryan Gurney 	.prepare_ioctl = dust_prepare_ioctl,
498e4f3fabdSBryan Gurney };
499e4f3fabdSBryan Gurney 
5009ccce5a0SYueHaibing static int __init dm_dust_init(void)
501e4f3fabdSBryan Gurney {
5026ec1be50SBryan Gurney 	int r = dm_register_target(&dust_target);
503e4f3fabdSBryan Gurney 
5046ec1be50SBryan Gurney 	if (r < 0)
5056ec1be50SBryan Gurney 		DMERR("dm_register_target failed %d", r);
506e4f3fabdSBryan Gurney 
5076ec1be50SBryan Gurney 	return r;
508e4f3fabdSBryan Gurney }
509e4f3fabdSBryan Gurney 
5109ccce5a0SYueHaibing static void __exit dm_dust_exit(void)
511e4f3fabdSBryan Gurney {
512e4f3fabdSBryan Gurney 	dm_unregister_target(&dust_target);
513e4f3fabdSBryan Gurney }
514e4f3fabdSBryan Gurney 
515e4f3fabdSBryan Gurney module_init(dm_dust_init);
516e4f3fabdSBryan Gurney module_exit(dm_dust_exit);
517e4f3fabdSBryan Gurney 
518e4f3fabdSBryan Gurney MODULE_DESCRIPTION(DM_NAME " dust test target");
519e4f3fabdSBryan Gurney MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
520e4f3fabdSBryan Gurney MODULE_LICENSE("GPL");
521