xref: /linux/drivers/md/dm-dust.c (revision 72d7df4c8079306e7fbd6243bf951461ed647a47)
1e4f3fabdSBryan Gurney // SPDX-License-Identifier: GPL-2.0
2e4f3fabdSBryan Gurney /*
3e4f3fabdSBryan Gurney  * Copyright (c) 2018 Red Hat, Inc.
4e4f3fabdSBryan Gurney  *
5e4f3fabdSBryan Gurney  * This is a test "dust" device, which fails reads on specified
6e4f3fabdSBryan Gurney  * sectors, emulating the behavior of a hard disk drive sending
7e4f3fabdSBryan Gurney  * a "Read Medium Error" sense.
8e4f3fabdSBryan Gurney  *
9e4f3fabdSBryan Gurney  */
10e4f3fabdSBryan Gurney 
11e4f3fabdSBryan Gurney #include <linux/device-mapper.h>
12e4f3fabdSBryan Gurney #include <linux/module.h>
13e4f3fabdSBryan Gurney #include <linux/rbtree.h>
14e4f3fabdSBryan Gurney 
15e4f3fabdSBryan Gurney #define DM_MSG_PREFIX "dust"
16e4f3fabdSBryan Gurney 
17e4f3fabdSBryan Gurney struct badblock {
18e4f3fabdSBryan Gurney 	struct rb_node node;
19e4f3fabdSBryan Gurney 	sector_t bb;
20*72d7df4cSBryan Gurney 	unsigned char wr_fail_cnt;
21e4f3fabdSBryan Gurney };
22e4f3fabdSBryan Gurney 
23e4f3fabdSBryan Gurney struct dust_device {
24e4f3fabdSBryan Gurney 	struct dm_dev *dev;
25e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
26e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
27e4f3fabdSBryan Gurney 	spinlock_t dust_lock;
28e4f3fabdSBryan Gurney 	unsigned int blksz;
2908c04c84SBryan Gurney 	int sect_per_block_shift;
30e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
31e4f3fabdSBryan Gurney 	sector_t start;
32e4f3fabdSBryan Gurney 	bool fail_read_on_bb:1;
33e4f3fabdSBryan Gurney 	bool quiet_mode:1;
34e4f3fabdSBryan Gurney };
35e4f3fabdSBryan Gurney 
36e4f3fabdSBryan Gurney static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
37e4f3fabdSBryan Gurney {
38e4f3fabdSBryan Gurney 	struct rb_node *node = root->rb_node;
39e4f3fabdSBryan Gurney 
40e4f3fabdSBryan Gurney 	while (node) {
41e4f3fabdSBryan Gurney 		struct badblock *bblk = rb_entry(node, struct badblock, node);
42e4f3fabdSBryan Gurney 
43e4f3fabdSBryan Gurney 		if (bblk->bb > blk)
44e4f3fabdSBryan Gurney 			node = node->rb_left;
45e4f3fabdSBryan Gurney 		else if (bblk->bb < blk)
46e4f3fabdSBryan Gurney 			node = node->rb_right;
47e4f3fabdSBryan Gurney 		else
48e4f3fabdSBryan Gurney 			return bblk;
49e4f3fabdSBryan Gurney 	}
50e4f3fabdSBryan Gurney 
51e4f3fabdSBryan Gurney 	return NULL;
52e4f3fabdSBryan Gurney }
53e4f3fabdSBryan Gurney 
54e4f3fabdSBryan Gurney static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
55e4f3fabdSBryan Gurney {
56e4f3fabdSBryan Gurney 	struct badblock *bblk;
57e4f3fabdSBryan Gurney 	struct rb_node **link = &root->rb_node, *parent = NULL;
58e4f3fabdSBryan Gurney 	sector_t value = new->bb;
59e4f3fabdSBryan Gurney 
60e4f3fabdSBryan Gurney 	while (*link) {
61e4f3fabdSBryan Gurney 		parent = *link;
62e4f3fabdSBryan Gurney 		bblk = rb_entry(parent, struct badblock, node);
63e4f3fabdSBryan Gurney 
64e4f3fabdSBryan Gurney 		if (bblk->bb > value)
65e4f3fabdSBryan Gurney 			link = &(*link)->rb_left;
66e4f3fabdSBryan Gurney 		else if (bblk->bb < value)
67e4f3fabdSBryan Gurney 			link = &(*link)->rb_right;
68e4f3fabdSBryan Gurney 		else
69e4f3fabdSBryan Gurney 			return false;
70e4f3fabdSBryan Gurney 	}
71e4f3fabdSBryan Gurney 
72e4f3fabdSBryan Gurney 	rb_link_node(&new->node, parent, link);
73e4f3fabdSBryan Gurney 	rb_insert_color(&new->node, root);
74e4f3fabdSBryan Gurney 
75e4f3fabdSBryan Gurney 	return true;
76e4f3fabdSBryan Gurney }
77e4f3fabdSBryan Gurney 
78e4f3fabdSBryan Gurney static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79e4f3fabdSBryan Gurney {
80e4f3fabdSBryan Gurney 	struct badblock *bblock;
81e4f3fabdSBryan Gurney 	unsigned long flags;
82e4f3fabdSBryan Gurney 
83e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
8408c04c84SBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block);
85e4f3fabdSBryan Gurney 
86e4f3fabdSBryan Gurney 	if (bblock == NULL) {
87e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
88e4f3fabdSBryan Gurney 			DMERR("%s: block %llu not found in badblocklist",
89e4f3fabdSBryan Gurney 			      __func__, block);
90e4f3fabdSBryan Gurney 		}
91e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
92e4f3fabdSBryan Gurney 		return -EINVAL;
93e4f3fabdSBryan Gurney 	}
94e4f3fabdSBryan Gurney 
95e4f3fabdSBryan Gurney 	rb_erase(&bblock->node, &dd->badblocklist);
96e4f3fabdSBryan Gurney 	dd->badblock_count--;
97e4f3fabdSBryan Gurney 	if (!dd->quiet_mode)
98e4f3fabdSBryan Gurney 		DMINFO("%s: badblock removed at block %llu", __func__, block);
99e4f3fabdSBryan Gurney 	kfree(bblock);
100e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
101e4f3fabdSBryan Gurney 
102e4f3fabdSBryan Gurney 	return 0;
103e4f3fabdSBryan Gurney }
104e4f3fabdSBryan Gurney 
105*72d7df4cSBryan Gurney static int dust_add_block(struct dust_device *dd, unsigned long long block,
106*72d7df4cSBryan Gurney 			  unsigned char wr_fail_cnt)
107e4f3fabdSBryan Gurney {
108e4f3fabdSBryan Gurney 	struct badblock *bblock;
109e4f3fabdSBryan Gurney 	unsigned long flags;
110e4f3fabdSBryan Gurney 
111e4f3fabdSBryan Gurney 	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
112e4f3fabdSBryan Gurney 	if (bblock == NULL) {
113e4f3fabdSBryan Gurney 		if (!dd->quiet_mode)
114e4f3fabdSBryan Gurney 			DMERR("%s: badblock allocation failed", __func__);
115e4f3fabdSBryan Gurney 		return -ENOMEM;
116e4f3fabdSBryan Gurney 	}
117e4f3fabdSBryan Gurney 
118e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
11908c04c84SBryan Gurney 	bblock->bb = block;
120*72d7df4cSBryan Gurney 	bblock->wr_fail_cnt = wr_fail_cnt;
121e4f3fabdSBryan Gurney 	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
122e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
123e4f3fabdSBryan Gurney 			DMERR("%s: block %llu already in badblocklist",
124e4f3fabdSBryan Gurney 			      __func__, block);
125e4f3fabdSBryan Gurney 		}
126e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
127e4f3fabdSBryan Gurney 		kfree(bblock);
128e4f3fabdSBryan Gurney 		return -EINVAL;
129e4f3fabdSBryan Gurney 	}
130e4f3fabdSBryan Gurney 
131e4f3fabdSBryan Gurney 	dd->badblock_count++;
132*72d7df4cSBryan Gurney 	if (!dd->quiet_mode) {
133*72d7df4cSBryan Gurney 		DMINFO("%s: badblock added at block %llu with write fail count %hhu",
134*72d7df4cSBryan Gurney 		       __func__, block, wr_fail_cnt);
135*72d7df4cSBryan Gurney 	}
136e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
137e4f3fabdSBryan Gurney 
138e4f3fabdSBryan Gurney 	return 0;
139e4f3fabdSBryan Gurney }
140e4f3fabdSBryan Gurney 
141e4f3fabdSBryan Gurney static int dust_query_block(struct dust_device *dd, unsigned long long block)
142e4f3fabdSBryan Gurney {
143e4f3fabdSBryan Gurney 	struct badblock *bblock;
144e4f3fabdSBryan Gurney 	unsigned long flags;
145e4f3fabdSBryan Gurney 
146e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
14708c04c84SBryan Gurney 	bblock = dust_rb_search(&dd->badblocklist, block);
148e4f3fabdSBryan Gurney 	if (bblock != NULL)
149e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu found in badblocklist", __func__, block);
150e4f3fabdSBryan Gurney 	else
151e4f3fabdSBryan Gurney 		DMINFO("%s: block %llu not found in badblocklist", __func__, block);
152e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
153e4f3fabdSBryan Gurney 
154e4f3fabdSBryan Gurney 	return 0;
155e4f3fabdSBryan Gurney }
156e4f3fabdSBryan Gurney 
157e4f3fabdSBryan Gurney static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
158e4f3fabdSBryan Gurney {
159e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
160e4f3fabdSBryan Gurney 
161e4f3fabdSBryan Gurney 	if (bblk)
162e4f3fabdSBryan Gurney 		return DM_MAPIO_KILL;
163e4f3fabdSBryan Gurney 
164e4f3fabdSBryan Gurney 	return DM_MAPIO_REMAPPED;
165e4f3fabdSBryan Gurney }
166e4f3fabdSBryan Gurney 
167e4f3fabdSBryan Gurney static int dust_map_read(struct dust_device *dd, sector_t thisblock,
168e4f3fabdSBryan Gurney 			 bool fail_read_on_bb)
169e4f3fabdSBryan Gurney {
170e4f3fabdSBryan Gurney 	unsigned long flags;
171cc7a7fb3SBryan Gurney 	int r = DM_MAPIO_REMAPPED;
172e4f3fabdSBryan Gurney 
173e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
17408c04c84SBryan Gurney 		thisblock >>= dd->sect_per_block_shift;
175e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
176cc7a7fb3SBryan Gurney 		r = __dust_map_read(dd, thisblock);
177e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
178e4f3fabdSBryan Gurney 	}
179e4f3fabdSBryan Gurney 
180cc7a7fb3SBryan Gurney 	return r;
181e4f3fabdSBryan Gurney }
182e4f3fabdSBryan Gurney 
183*72d7df4cSBryan Gurney static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
184e4f3fabdSBryan Gurney {
185e4f3fabdSBryan Gurney 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
186e4f3fabdSBryan Gurney 
187*72d7df4cSBryan Gurney 	if (bblk && bblk->wr_fail_cnt > 0) {
188*72d7df4cSBryan Gurney 		bblk->wr_fail_cnt--;
189*72d7df4cSBryan Gurney 		return DM_MAPIO_KILL;
190*72d7df4cSBryan Gurney 	}
191*72d7df4cSBryan Gurney 
192e4f3fabdSBryan Gurney 	if (bblk) {
193e4f3fabdSBryan Gurney 		rb_erase(&bblk->node, &dd->badblocklist);
194e4f3fabdSBryan Gurney 		dd->badblock_count--;
195e4f3fabdSBryan Gurney 		kfree(bblk);
196e4f3fabdSBryan Gurney 		if (!dd->quiet_mode) {
197e4f3fabdSBryan Gurney 			sector_div(thisblock, dd->sect_per_block);
198e4f3fabdSBryan Gurney 			DMINFO("block %llu removed from badblocklist by write",
199e4f3fabdSBryan Gurney 			       (unsigned long long)thisblock);
200e4f3fabdSBryan Gurney 		}
201e4f3fabdSBryan Gurney 	}
202*72d7df4cSBryan Gurney 
203*72d7df4cSBryan Gurney 	return DM_MAPIO_REMAPPED;
204e4f3fabdSBryan Gurney }
205e4f3fabdSBryan Gurney 
206e4f3fabdSBryan Gurney static int dust_map_write(struct dust_device *dd, sector_t thisblock,
207e4f3fabdSBryan Gurney 			  bool fail_read_on_bb)
208e4f3fabdSBryan Gurney {
209e4f3fabdSBryan Gurney 	unsigned long flags;
210*72d7df4cSBryan Gurney 	int ret = DM_MAPIO_REMAPPED;
211e4f3fabdSBryan Gurney 
212e4f3fabdSBryan Gurney 	if (fail_read_on_bb) {
21308c04c84SBryan Gurney 		thisblock >>= dd->sect_per_block_shift;
214e4f3fabdSBryan Gurney 		spin_lock_irqsave(&dd->dust_lock, flags);
215*72d7df4cSBryan Gurney 		ret = __dust_map_write(dd, thisblock);
216e4f3fabdSBryan Gurney 		spin_unlock_irqrestore(&dd->dust_lock, flags);
217e4f3fabdSBryan Gurney 	}
218e4f3fabdSBryan Gurney 
219*72d7df4cSBryan Gurney 	return ret;
220e4f3fabdSBryan Gurney }
221e4f3fabdSBryan Gurney 
222e4f3fabdSBryan Gurney static int dust_map(struct dm_target *ti, struct bio *bio)
223e4f3fabdSBryan Gurney {
224e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
225cc7a7fb3SBryan Gurney 	int r;
226e4f3fabdSBryan Gurney 
227e4f3fabdSBryan Gurney 	bio_set_dev(bio, dd->dev->bdev);
228e4f3fabdSBryan Gurney 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
229e4f3fabdSBryan Gurney 
230e4f3fabdSBryan Gurney 	if (bio_data_dir(bio) == READ)
231cc7a7fb3SBryan Gurney 		r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
232e4f3fabdSBryan Gurney 	else
233cc7a7fb3SBryan Gurney 		r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
234e4f3fabdSBryan Gurney 
235cc7a7fb3SBryan Gurney 	return r;
236e4f3fabdSBryan Gurney }
237e4f3fabdSBryan Gurney 
238e4f3fabdSBryan Gurney static bool __dust_clear_badblocks(struct rb_root *tree,
239e4f3fabdSBryan Gurney 				   unsigned long long count)
240e4f3fabdSBryan Gurney {
241e4f3fabdSBryan Gurney 	struct rb_node *node = NULL, *nnode = NULL;
242e4f3fabdSBryan Gurney 
243e4f3fabdSBryan Gurney 	nnode = rb_first(tree);
244e4f3fabdSBryan Gurney 	if (nnode == NULL) {
245e4f3fabdSBryan Gurney 		BUG_ON(count != 0);
246e4f3fabdSBryan Gurney 		return false;
247e4f3fabdSBryan Gurney 	}
248e4f3fabdSBryan Gurney 
249e4f3fabdSBryan Gurney 	while (nnode) {
250e4f3fabdSBryan Gurney 		node = nnode;
251e4f3fabdSBryan Gurney 		nnode = rb_next(node);
252e4f3fabdSBryan Gurney 		rb_erase(node, tree);
253e4f3fabdSBryan Gurney 		count--;
254e4f3fabdSBryan Gurney 		kfree(node);
255e4f3fabdSBryan Gurney 	}
256e4f3fabdSBryan Gurney 	BUG_ON(count != 0);
257e4f3fabdSBryan Gurney 	BUG_ON(tree->rb_node != NULL);
258e4f3fabdSBryan Gurney 
259e4f3fabdSBryan Gurney 	return true;
260e4f3fabdSBryan Gurney }
261e4f3fabdSBryan Gurney 
262e4f3fabdSBryan Gurney static int dust_clear_badblocks(struct dust_device *dd)
263e4f3fabdSBryan Gurney {
264e4f3fabdSBryan Gurney 	unsigned long flags;
265e4f3fabdSBryan Gurney 	struct rb_root badblocklist;
266e4f3fabdSBryan Gurney 	unsigned long long badblock_count;
267e4f3fabdSBryan Gurney 
268e4f3fabdSBryan Gurney 	spin_lock_irqsave(&dd->dust_lock, flags);
269e4f3fabdSBryan Gurney 	badblocklist = dd->badblocklist;
270e4f3fabdSBryan Gurney 	badblock_count = dd->badblock_count;
271e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
272e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
273e4f3fabdSBryan Gurney 	spin_unlock_irqrestore(&dd->dust_lock, flags);
274e4f3fabdSBryan Gurney 
275e4f3fabdSBryan Gurney 	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
276e4f3fabdSBryan Gurney 		DMINFO("%s: no badblocks found", __func__);
277e4f3fabdSBryan Gurney 	else
278e4f3fabdSBryan Gurney 		DMINFO("%s: badblocks cleared", __func__);
279e4f3fabdSBryan Gurney 
280e4f3fabdSBryan Gurney 	return 0;
281e4f3fabdSBryan Gurney }
282e4f3fabdSBryan Gurney 
283e4f3fabdSBryan Gurney /*
284e4f3fabdSBryan Gurney  * Target parameters:
285e4f3fabdSBryan Gurney  *
286e4f3fabdSBryan Gurney  * <device_path> <offset> <blksz>
287e4f3fabdSBryan Gurney  *
288e4f3fabdSBryan Gurney  * device_path: path to the block device
289e4f3fabdSBryan Gurney  * offset: offset to data area from start of device_path
290e4f3fabdSBryan Gurney  * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
291e4f3fabdSBryan Gurney  */
292e4f3fabdSBryan Gurney static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
293e4f3fabdSBryan Gurney {
294e4f3fabdSBryan Gurney 	struct dust_device *dd;
295e4f3fabdSBryan Gurney 	unsigned long long tmp;
296e4f3fabdSBryan Gurney 	char dummy;
297e4f3fabdSBryan Gurney 	unsigned int blksz;
298e4f3fabdSBryan Gurney 	unsigned int sect_per_block;
299e4f3fabdSBryan Gurney 	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
300e4f3fabdSBryan Gurney 	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
301e4f3fabdSBryan Gurney 
302e4f3fabdSBryan Gurney 	if (argc != 3) {
303e4f3fabdSBryan Gurney 		ti->error = "Invalid argument count";
304e4f3fabdSBryan Gurney 		return -EINVAL;
305e4f3fabdSBryan Gurney 	}
306e4f3fabdSBryan Gurney 
307e4f3fabdSBryan Gurney 	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
308e4f3fabdSBryan Gurney 		ti->error = "Invalid block size parameter";
309e4f3fabdSBryan Gurney 		return -EINVAL;
310e4f3fabdSBryan Gurney 	}
311e4f3fabdSBryan Gurney 
312e4f3fabdSBryan Gurney 	if (blksz < 512) {
313e4f3fabdSBryan Gurney 		ti->error = "Block size must be at least 512";
314e4f3fabdSBryan Gurney 		return -EINVAL;
315e4f3fabdSBryan Gurney 	}
316e4f3fabdSBryan Gurney 
317e4f3fabdSBryan Gurney 	if (!is_power_of_2(blksz)) {
318e4f3fabdSBryan Gurney 		ti->error = "Block size must be a power of 2";
319e4f3fabdSBryan Gurney 		return -EINVAL;
320e4f3fabdSBryan Gurney 	}
321e4f3fabdSBryan Gurney 
322e4f3fabdSBryan Gurney 	if (to_sector(blksz) > max_block_sectors) {
323e4f3fabdSBryan Gurney 		ti->error = "Block size is too large";
324e4f3fabdSBryan Gurney 		return -EINVAL;
325e4f3fabdSBryan Gurney 	}
326e4f3fabdSBryan Gurney 
327e4f3fabdSBryan Gurney 	sect_per_block = (blksz >> SECTOR_SHIFT);
328e4f3fabdSBryan Gurney 
329e4f3fabdSBryan Gurney 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
330e4f3fabdSBryan Gurney 		ti->error = "Invalid device offset sector";
331e4f3fabdSBryan Gurney 		return -EINVAL;
332e4f3fabdSBryan Gurney 	}
333e4f3fabdSBryan Gurney 
334e4f3fabdSBryan Gurney 	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
335e4f3fabdSBryan Gurney 	if (dd == NULL) {
336e4f3fabdSBryan Gurney 		ti->error = "Cannot allocate context";
337e4f3fabdSBryan Gurney 		return -ENOMEM;
338e4f3fabdSBryan Gurney 	}
339e4f3fabdSBryan Gurney 
340e4f3fabdSBryan Gurney 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
341e4f3fabdSBryan Gurney 		ti->error = "Device lookup failed";
342e4f3fabdSBryan Gurney 		kfree(dd);
343e4f3fabdSBryan Gurney 		return -EINVAL;
344e4f3fabdSBryan Gurney 	}
345e4f3fabdSBryan Gurney 
346e4f3fabdSBryan Gurney 	dd->sect_per_block = sect_per_block;
347e4f3fabdSBryan Gurney 	dd->blksz = blksz;
348e4f3fabdSBryan Gurney 	dd->start = tmp;
349e4f3fabdSBryan Gurney 
35008c04c84SBryan Gurney 	dd->sect_per_block_shift = __ffs(sect_per_block);
35108c04c84SBryan Gurney 
352e4f3fabdSBryan Gurney 	/*
353e4f3fabdSBryan Gurney 	 * Whether to fail a read on a "bad" block.
354e4f3fabdSBryan Gurney 	 * Defaults to false; enabled later by message.
355e4f3fabdSBryan Gurney 	 */
356e4f3fabdSBryan Gurney 	dd->fail_read_on_bb = false;
357e4f3fabdSBryan Gurney 
358e4f3fabdSBryan Gurney 	/*
359e4f3fabdSBryan Gurney 	 * Initialize bad block list rbtree.
360e4f3fabdSBryan Gurney 	 */
361e4f3fabdSBryan Gurney 	dd->badblocklist = RB_ROOT;
362e4f3fabdSBryan Gurney 	dd->badblock_count = 0;
363e4f3fabdSBryan Gurney 	spin_lock_init(&dd->dust_lock);
364e4f3fabdSBryan Gurney 
365e4f3fabdSBryan Gurney 	dd->quiet_mode = false;
366e4f3fabdSBryan Gurney 
367e4f3fabdSBryan Gurney 	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
368e4f3fabdSBryan Gurney 
369e4f3fabdSBryan Gurney 	ti->num_discard_bios = 1;
370e4f3fabdSBryan Gurney 	ti->num_flush_bios = 1;
371e4f3fabdSBryan Gurney 	ti->private = dd;
372e4f3fabdSBryan Gurney 
373e4f3fabdSBryan Gurney 	return 0;
374e4f3fabdSBryan Gurney }
375e4f3fabdSBryan Gurney 
376e4f3fabdSBryan Gurney static void dust_dtr(struct dm_target *ti)
377e4f3fabdSBryan Gurney {
378e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
379e4f3fabdSBryan Gurney 
380e4f3fabdSBryan Gurney 	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
381e4f3fabdSBryan Gurney 	dm_put_device(ti, dd->dev);
382e4f3fabdSBryan Gurney 	kfree(dd);
383e4f3fabdSBryan Gurney }
384e4f3fabdSBryan Gurney 
385e4f3fabdSBryan Gurney static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
386e4f3fabdSBryan Gurney 			char *result_buf, unsigned int maxlen)
387e4f3fabdSBryan Gurney {
388e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
389e4f3fabdSBryan Gurney 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
390e4f3fabdSBryan Gurney 	bool invalid_msg = false;
3916ec1be50SBryan Gurney 	int r = -EINVAL;
392e4f3fabdSBryan Gurney 	unsigned long long tmp, block;
393*72d7df4cSBryan Gurney 	unsigned char wr_fail_cnt;
394*72d7df4cSBryan Gurney 	unsigned int tmp_ui;
395e4f3fabdSBryan Gurney 	unsigned long flags;
396e4f3fabdSBryan Gurney 	char dummy;
397e4f3fabdSBryan Gurney 
398e4f3fabdSBryan Gurney 	if (argc == 1) {
399e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock") ||
400e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "removebadblock") ||
401e4f3fabdSBryan Gurney 		    !strcasecmp(argv[0], "queryblock")) {
402e4f3fabdSBryan Gurney 			DMERR("%s requires an additional argument", argv[0]);
403e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "disable")) {
404e4f3fabdSBryan Gurney 			DMINFO("disabling read failures on bad sectors");
405e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = false;
4066ec1be50SBryan Gurney 			r = 0;
407e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "enable")) {
408e4f3fabdSBryan Gurney 			DMINFO("enabling read failures on bad sectors");
409e4f3fabdSBryan Gurney 			dd->fail_read_on_bb = true;
4106ec1be50SBryan Gurney 			r = 0;
411e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "countbadblocks")) {
412e4f3fabdSBryan Gurney 			spin_lock_irqsave(&dd->dust_lock, flags);
413e4f3fabdSBryan Gurney 			DMINFO("countbadblocks: %llu badblock(s) found",
414e4f3fabdSBryan Gurney 			       dd->badblock_count);
415e4f3fabdSBryan Gurney 			spin_unlock_irqrestore(&dd->dust_lock, flags);
4166ec1be50SBryan Gurney 			r = 0;
417e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
4186ec1be50SBryan Gurney 			r = dust_clear_badblocks(dd);
419e4f3fabdSBryan Gurney 		} else if (!strcasecmp(argv[0], "quiet")) {
420e4f3fabdSBryan Gurney 			if (!dd->quiet_mode)
421e4f3fabdSBryan Gurney 				dd->quiet_mode = true;
422e4f3fabdSBryan Gurney 			else
423e4f3fabdSBryan Gurney 				dd->quiet_mode = false;
4246ec1be50SBryan Gurney 			r = 0;
425e4f3fabdSBryan Gurney 		} else {
426e4f3fabdSBryan Gurney 			invalid_msg = true;
427e4f3fabdSBryan Gurney 		}
428e4f3fabdSBryan Gurney 	} else if (argc == 2) {
429e4f3fabdSBryan Gurney 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
4306ec1be50SBryan Gurney 			return r;
431e4f3fabdSBryan Gurney 
432e4f3fabdSBryan Gurney 		block = tmp;
433e4f3fabdSBryan Gurney 		sector_div(size, dd->sect_per_block);
434cacddeabSColin Ian King 		if (block > size) {
435e4f3fabdSBryan Gurney 			DMERR("selected block value out of range");
4366ec1be50SBryan Gurney 			return r;
437e4f3fabdSBryan Gurney 		}
438e4f3fabdSBryan Gurney 
439e4f3fabdSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock"))
440*72d7df4cSBryan Gurney 			r = dust_add_block(dd, block, 0);
441e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "removebadblock"))
4426ec1be50SBryan Gurney 			r = dust_remove_block(dd, block);
443e4f3fabdSBryan Gurney 		else if (!strcasecmp(argv[0], "queryblock"))
4446ec1be50SBryan Gurney 			r = dust_query_block(dd, block);
445e4f3fabdSBryan Gurney 		else
446e4f3fabdSBryan Gurney 			invalid_msg = true;
447e4f3fabdSBryan Gurney 
448*72d7df4cSBryan Gurney 	} else if (argc == 3) {
449*72d7df4cSBryan Gurney 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
450*72d7df4cSBryan Gurney 			return r;
451*72d7df4cSBryan Gurney 
452*72d7df4cSBryan Gurney 		if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
453*72d7df4cSBryan Gurney 			return r;
454*72d7df4cSBryan Gurney 
455*72d7df4cSBryan Gurney 		block = tmp;
456*72d7df4cSBryan Gurney 		if (tmp_ui > 255) {
457*72d7df4cSBryan Gurney 			DMERR("selected write fail count out of range");
458*72d7df4cSBryan Gurney 			return r;
459*72d7df4cSBryan Gurney 		}
460*72d7df4cSBryan Gurney 		wr_fail_cnt = tmp_ui;
461*72d7df4cSBryan Gurney 		sector_div(size, dd->sect_per_block);
462*72d7df4cSBryan Gurney 		if (block > size) {
463*72d7df4cSBryan Gurney 			DMERR("selected block value out of range");
464*72d7df4cSBryan Gurney 			return r;
465*72d7df4cSBryan Gurney 		}
466*72d7df4cSBryan Gurney 
467*72d7df4cSBryan Gurney 		if (!strcasecmp(argv[0], "addbadblock"))
468*72d7df4cSBryan Gurney 			r = dust_add_block(dd, block, wr_fail_cnt);
469*72d7df4cSBryan Gurney 		else
470*72d7df4cSBryan Gurney 			invalid_msg = true;
471*72d7df4cSBryan Gurney 
472e4f3fabdSBryan Gurney 	} else
473e4f3fabdSBryan Gurney 		DMERR("invalid number of arguments '%d'", argc);
474e4f3fabdSBryan Gurney 
475e4f3fabdSBryan Gurney 	if (invalid_msg)
476e4f3fabdSBryan Gurney 		DMERR("unrecognized message '%s' received", argv[0]);
477e4f3fabdSBryan Gurney 
4786ec1be50SBryan Gurney 	return r;
479e4f3fabdSBryan Gurney }
480e4f3fabdSBryan Gurney 
481e4f3fabdSBryan Gurney static void dust_status(struct dm_target *ti, status_type_t type,
482e4f3fabdSBryan Gurney 			unsigned int status_flags, char *result, unsigned int maxlen)
483e4f3fabdSBryan Gurney {
484e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
485e4f3fabdSBryan Gurney 	unsigned int sz = 0;
486e4f3fabdSBryan Gurney 
487e4f3fabdSBryan Gurney 	switch (type) {
488e4f3fabdSBryan Gurney 	case STATUSTYPE_INFO:
489e4f3fabdSBryan Gurney 		DMEMIT("%s %s %s", dd->dev->name,
490e4f3fabdSBryan Gurney 		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
491e4f3fabdSBryan Gurney 		       dd->quiet_mode ? "quiet" : "verbose");
492e4f3fabdSBryan Gurney 		break;
493e4f3fabdSBryan Gurney 
494e4f3fabdSBryan Gurney 	case STATUSTYPE_TABLE:
495e4f3fabdSBryan Gurney 		DMEMIT("%s %llu %u", dd->dev->name,
496e4f3fabdSBryan Gurney 		       (unsigned long long)dd->start, dd->blksz);
497e4f3fabdSBryan Gurney 		break;
498e4f3fabdSBryan Gurney 	}
499e4f3fabdSBryan Gurney }
500e4f3fabdSBryan Gurney 
501e4f3fabdSBryan Gurney static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
502e4f3fabdSBryan Gurney {
503e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
504e4f3fabdSBryan Gurney 	struct dm_dev *dev = dd->dev;
505e4f3fabdSBryan Gurney 
506e4f3fabdSBryan Gurney 	*bdev = dev->bdev;
507e4f3fabdSBryan Gurney 
508e4f3fabdSBryan Gurney 	/*
509e4f3fabdSBryan Gurney 	 * Only pass ioctls through if the device sizes match exactly.
510e4f3fabdSBryan Gurney 	 */
511e4f3fabdSBryan Gurney 	if (dd->start ||
512e4f3fabdSBryan Gurney 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
513e4f3fabdSBryan Gurney 		return 1;
514e4f3fabdSBryan Gurney 
515e4f3fabdSBryan Gurney 	return 0;
516e4f3fabdSBryan Gurney }
517e4f3fabdSBryan Gurney 
518e4f3fabdSBryan Gurney static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
519e4f3fabdSBryan Gurney 				void *data)
520e4f3fabdSBryan Gurney {
521e4f3fabdSBryan Gurney 	struct dust_device *dd = ti->private;
522e4f3fabdSBryan Gurney 
523e4f3fabdSBryan Gurney 	return fn(ti, dd->dev, dd->start, ti->len, data);
524e4f3fabdSBryan Gurney }
525e4f3fabdSBryan Gurney 
526e4f3fabdSBryan Gurney static struct target_type dust_target = {
527e4f3fabdSBryan Gurney 	.name = "dust",
528e4f3fabdSBryan Gurney 	.version = {1, 0, 0},
529e4f3fabdSBryan Gurney 	.module = THIS_MODULE,
530e4f3fabdSBryan Gurney 	.ctr = dust_ctr,
531e4f3fabdSBryan Gurney 	.dtr = dust_dtr,
532e4f3fabdSBryan Gurney 	.iterate_devices = dust_iterate_devices,
533e4f3fabdSBryan Gurney 	.map = dust_map,
534e4f3fabdSBryan Gurney 	.message = dust_message,
535e4f3fabdSBryan Gurney 	.status = dust_status,
536e4f3fabdSBryan Gurney 	.prepare_ioctl = dust_prepare_ioctl,
537e4f3fabdSBryan Gurney };
538e4f3fabdSBryan Gurney 
5399ccce5a0SYueHaibing static int __init dm_dust_init(void)
540e4f3fabdSBryan Gurney {
5416ec1be50SBryan Gurney 	int r = dm_register_target(&dust_target);
542e4f3fabdSBryan Gurney 
5436ec1be50SBryan Gurney 	if (r < 0)
5446ec1be50SBryan Gurney 		DMERR("dm_register_target failed %d", r);
545e4f3fabdSBryan Gurney 
5466ec1be50SBryan Gurney 	return r;
547e4f3fabdSBryan Gurney }
548e4f3fabdSBryan Gurney 
5499ccce5a0SYueHaibing static void __exit dm_dust_exit(void)
550e4f3fabdSBryan Gurney {
551e4f3fabdSBryan Gurney 	dm_unregister_target(&dust_target);
552e4f3fabdSBryan Gurney }
553e4f3fabdSBryan Gurney 
554e4f3fabdSBryan Gurney module_init(dm_dust_init);
555e4f3fabdSBryan Gurney module_exit(dm_dust_exit);
556e4f3fabdSBryan Gurney 
557e4f3fabdSBryan Gurney MODULE_DESCRIPTION(DM_NAME " dust test target");
558e4f3fabdSBryan Gurney MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
559e4f3fabdSBryan Gurney MODULE_LICENSE("GPL");
560