13bd94003SHeinz Mauelshagen // SPDX-License-Identifier: GPL-2.0-only
2e4f3fabdSBryan Gurney /*
3e4f3fabdSBryan Gurney * Copyright (c) 2018 Red Hat, Inc.
4e4f3fabdSBryan Gurney *
5e4f3fabdSBryan Gurney * This is a test "dust" device, which fails reads on specified
6e4f3fabdSBryan Gurney * sectors, emulating the behavior of a hard disk drive sending
7e4f3fabdSBryan Gurney * a "Read Medium Error" sense.
8e4f3fabdSBryan Gurney *
9e4f3fabdSBryan Gurney */
10e4f3fabdSBryan Gurney
11e4f3fabdSBryan Gurney #include <linux/device-mapper.h>
12e4f3fabdSBryan Gurney #include <linux/module.h>
13e4f3fabdSBryan Gurney #include <linux/rbtree.h>
14e4f3fabdSBryan Gurney
15e4f3fabdSBryan Gurney #define DM_MSG_PREFIX "dust"
16e4f3fabdSBryan Gurney
17e4f3fabdSBryan Gurney struct badblock {
18e4f3fabdSBryan Gurney struct rb_node node;
19e4f3fabdSBryan Gurney sector_t bb;
2072d7df4cSBryan Gurney unsigned char wr_fail_cnt;
21e4f3fabdSBryan Gurney };
22e4f3fabdSBryan Gurney
23e4f3fabdSBryan Gurney struct dust_device {
24e4f3fabdSBryan Gurney struct dm_dev *dev;
25e4f3fabdSBryan Gurney struct rb_root badblocklist;
26e4f3fabdSBryan Gurney unsigned long long badblock_count;
27e4f3fabdSBryan Gurney spinlock_t dust_lock;
28e4f3fabdSBryan Gurney unsigned int blksz;
2908c04c84SBryan Gurney int sect_per_block_shift;
30e4f3fabdSBryan Gurney unsigned int sect_per_block;
31e4f3fabdSBryan Gurney sector_t start;
32e4f3fabdSBryan Gurney bool fail_read_on_bb:1;
33e4f3fabdSBryan Gurney bool quiet_mode:1;
34e4f3fabdSBryan Gurney };
35e4f3fabdSBryan Gurney
dust_rb_search(struct rb_root * root,sector_t blk)36e4f3fabdSBryan Gurney static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
37e4f3fabdSBryan Gurney {
38e4f3fabdSBryan Gurney struct rb_node *node = root->rb_node;
39e4f3fabdSBryan Gurney
40e4f3fabdSBryan Gurney while (node) {
41e4f3fabdSBryan Gurney struct badblock *bblk = rb_entry(node, struct badblock, node);
42e4f3fabdSBryan Gurney
43e4f3fabdSBryan Gurney if (bblk->bb > blk)
44e4f3fabdSBryan Gurney node = node->rb_left;
45e4f3fabdSBryan Gurney else if (bblk->bb < blk)
46e4f3fabdSBryan Gurney node = node->rb_right;
47e4f3fabdSBryan Gurney else
48e4f3fabdSBryan Gurney return bblk;
49e4f3fabdSBryan Gurney }
50e4f3fabdSBryan Gurney
51e4f3fabdSBryan Gurney return NULL;
52e4f3fabdSBryan Gurney }
53e4f3fabdSBryan Gurney
dust_rb_insert(struct rb_root * root,struct badblock * new)54e4f3fabdSBryan Gurney static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
55e4f3fabdSBryan Gurney {
56e4f3fabdSBryan Gurney struct badblock *bblk;
57e4f3fabdSBryan Gurney struct rb_node **link = &root->rb_node, *parent = NULL;
58e4f3fabdSBryan Gurney sector_t value = new->bb;
59e4f3fabdSBryan Gurney
60e4f3fabdSBryan Gurney while (*link) {
61e4f3fabdSBryan Gurney parent = *link;
62e4f3fabdSBryan Gurney bblk = rb_entry(parent, struct badblock, node);
63e4f3fabdSBryan Gurney
64e4f3fabdSBryan Gurney if (bblk->bb > value)
65e4f3fabdSBryan Gurney link = &(*link)->rb_left;
66e4f3fabdSBryan Gurney else if (bblk->bb < value)
67e4f3fabdSBryan Gurney link = &(*link)->rb_right;
68e4f3fabdSBryan Gurney else
69e4f3fabdSBryan Gurney return false;
70e4f3fabdSBryan Gurney }
71e4f3fabdSBryan Gurney
72e4f3fabdSBryan Gurney rb_link_node(&new->node, parent, link);
73e4f3fabdSBryan Gurney rb_insert_color(&new->node, root);
74e4f3fabdSBryan Gurney
75e4f3fabdSBryan Gurney return true;
76e4f3fabdSBryan Gurney }
77e4f3fabdSBryan Gurney
dust_remove_block(struct dust_device * dd,unsigned long long block)78e4f3fabdSBryan Gurney static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79e4f3fabdSBryan Gurney {
80e4f3fabdSBryan Gurney struct badblock *bblock;
81e4f3fabdSBryan Gurney unsigned long flags;
82e4f3fabdSBryan Gurney
83e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
8408c04c84SBryan Gurney bblock = dust_rb_search(&dd->badblocklist, block);
85e4f3fabdSBryan Gurney
86e4f3fabdSBryan Gurney if (bblock == NULL) {
87e4f3fabdSBryan Gurney if (!dd->quiet_mode) {
88e4f3fabdSBryan Gurney DMERR("%s: block %llu not found in badblocklist",
89e4f3fabdSBryan Gurney __func__, block);
90e4f3fabdSBryan Gurney }
91e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
92e4f3fabdSBryan Gurney return -EINVAL;
93e4f3fabdSBryan Gurney }
94e4f3fabdSBryan Gurney
95e4f3fabdSBryan Gurney rb_erase(&bblock->node, &dd->badblocklist);
96e4f3fabdSBryan Gurney dd->badblock_count--;
97e4f3fabdSBryan Gurney if (!dd->quiet_mode)
98e4f3fabdSBryan Gurney DMINFO("%s: badblock removed at block %llu", __func__, block);
99e4f3fabdSBryan Gurney kfree(bblock);
100e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
101e4f3fabdSBryan Gurney
102e4f3fabdSBryan Gurney return 0;
103e4f3fabdSBryan Gurney }
104e4f3fabdSBryan Gurney
dust_add_block(struct dust_device * dd,unsigned long long block,unsigned char wr_fail_cnt)10572d7df4cSBryan Gurney static int dust_add_block(struct dust_device *dd, unsigned long long block,
10672d7df4cSBryan Gurney unsigned char wr_fail_cnt)
107e4f3fabdSBryan Gurney {
108e4f3fabdSBryan Gurney struct badblock *bblock;
109e4f3fabdSBryan Gurney unsigned long flags;
110e4f3fabdSBryan Gurney
111e4f3fabdSBryan Gurney bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
112e4f3fabdSBryan Gurney if (bblock == NULL) {
113e4f3fabdSBryan Gurney if (!dd->quiet_mode)
114e4f3fabdSBryan Gurney DMERR("%s: badblock allocation failed", __func__);
115e4f3fabdSBryan Gurney return -ENOMEM;
116e4f3fabdSBryan Gurney }
117e4f3fabdSBryan Gurney
118e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
11908c04c84SBryan Gurney bblock->bb = block;
12072d7df4cSBryan Gurney bblock->wr_fail_cnt = wr_fail_cnt;
121e4f3fabdSBryan Gurney if (!dust_rb_insert(&dd->badblocklist, bblock)) {
122e4f3fabdSBryan Gurney if (!dd->quiet_mode) {
123e4f3fabdSBryan Gurney DMERR("%s: block %llu already in badblocklist",
124e4f3fabdSBryan Gurney __func__, block);
125e4f3fabdSBryan Gurney }
126e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
127e4f3fabdSBryan Gurney kfree(bblock);
128e4f3fabdSBryan Gurney return -EINVAL;
129e4f3fabdSBryan Gurney }
130e4f3fabdSBryan Gurney
131e4f3fabdSBryan Gurney dd->badblock_count++;
13272d7df4cSBryan Gurney if (!dd->quiet_mode) {
133892c7a77STom Rix DMINFO("%s: badblock added at block %llu with write fail count %u",
13472d7df4cSBryan Gurney __func__, block, wr_fail_cnt);
13572d7df4cSBryan Gurney }
136e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
137e4f3fabdSBryan Gurney
138e4f3fabdSBryan Gurney return 0;
139e4f3fabdSBryan Gurney }
140e4f3fabdSBryan Gurney
dust_query_block(struct dust_device * dd,unsigned long long block,char * result,unsigned int maxlen,unsigned int * sz_ptr)1414f7f590bSyangerkun static int dust_query_block(struct dust_device *dd, unsigned long long block, char *result,
1424f7f590bSyangerkun unsigned int maxlen, unsigned int *sz_ptr)
143e4f3fabdSBryan Gurney {
144e4f3fabdSBryan Gurney struct badblock *bblock;
145e4f3fabdSBryan Gurney unsigned long flags;
1464f7f590bSyangerkun unsigned int sz = *sz_ptr;
147e4f3fabdSBryan Gurney
148e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
14908c04c84SBryan Gurney bblock = dust_rb_search(&dd->badblocklist, block);
150e4f3fabdSBryan Gurney if (bblock != NULL)
1514f7f590bSyangerkun DMEMIT("%s: block %llu found in badblocklist", __func__, block);
152e4f3fabdSBryan Gurney else
1534f7f590bSyangerkun DMEMIT("%s: block %llu not found in badblocklist", __func__, block);
154e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
155e4f3fabdSBryan Gurney
1564f7f590bSyangerkun return 1;
157e4f3fabdSBryan Gurney }
158e4f3fabdSBryan Gurney
__dust_map_read(struct dust_device * dd,sector_t thisblock)159e4f3fabdSBryan Gurney static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
160e4f3fabdSBryan Gurney {
161e4f3fabdSBryan Gurney struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
162e4f3fabdSBryan Gurney
163e4f3fabdSBryan Gurney if (bblk)
164e4f3fabdSBryan Gurney return DM_MAPIO_KILL;
165e4f3fabdSBryan Gurney
166e4f3fabdSBryan Gurney return DM_MAPIO_REMAPPED;
167e4f3fabdSBryan Gurney }
168e4f3fabdSBryan Gurney
dust_map_read(struct dust_device * dd,sector_t thisblock,bool fail_read_on_bb)169e4f3fabdSBryan Gurney static int dust_map_read(struct dust_device *dd, sector_t thisblock,
170e4f3fabdSBryan Gurney bool fail_read_on_bb)
171e4f3fabdSBryan Gurney {
172e4f3fabdSBryan Gurney unsigned long flags;
173cc7a7fb3SBryan Gurney int r = DM_MAPIO_REMAPPED;
174e4f3fabdSBryan Gurney
175e4f3fabdSBryan Gurney if (fail_read_on_bb) {
17608c04c84SBryan Gurney thisblock >>= dd->sect_per_block_shift;
177e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
178cc7a7fb3SBryan Gurney r = __dust_map_read(dd, thisblock);
179e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
180e4f3fabdSBryan Gurney }
181e4f3fabdSBryan Gurney
182cc7a7fb3SBryan Gurney return r;
183e4f3fabdSBryan Gurney }
184e4f3fabdSBryan Gurney
__dust_map_write(struct dust_device * dd,sector_t thisblock)18572d7df4cSBryan Gurney static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
186e4f3fabdSBryan Gurney {
187e4f3fabdSBryan Gurney struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
188e4f3fabdSBryan Gurney
18972d7df4cSBryan Gurney if (bblk && bblk->wr_fail_cnt > 0) {
19072d7df4cSBryan Gurney bblk->wr_fail_cnt--;
19172d7df4cSBryan Gurney return DM_MAPIO_KILL;
19272d7df4cSBryan Gurney }
19372d7df4cSBryan Gurney
194e4f3fabdSBryan Gurney if (bblk) {
195e4f3fabdSBryan Gurney rb_erase(&bblk->node, &dd->badblocklist);
196e4f3fabdSBryan Gurney dd->badblock_count--;
197e4f3fabdSBryan Gurney kfree(bblk);
198e4f3fabdSBryan Gurney if (!dd->quiet_mode) {
199e4f3fabdSBryan Gurney sector_div(thisblock, dd->sect_per_block);
200e4f3fabdSBryan Gurney DMINFO("block %llu removed from badblocklist by write",
201e4f3fabdSBryan Gurney (unsigned long long)thisblock);
202e4f3fabdSBryan Gurney }
203e4f3fabdSBryan Gurney }
20472d7df4cSBryan Gurney
20572d7df4cSBryan Gurney return DM_MAPIO_REMAPPED;
206e4f3fabdSBryan Gurney }
207e4f3fabdSBryan Gurney
dust_map_write(struct dust_device * dd,sector_t thisblock,bool fail_read_on_bb)208e4f3fabdSBryan Gurney static int dust_map_write(struct dust_device *dd, sector_t thisblock,
209e4f3fabdSBryan Gurney bool fail_read_on_bb)
210e4f3fabdSBryan Gurney {
211e4f3fabdSBryan Gurney unsigned long flags;
21288e7cafdSBryan Gurney int r = DM_MAPIO_REMAPPED;
213e4f3fabdSBryan Gurney
214e4f3fabdSBryan Gurney if (fail_read_on_bb) {
21508c04c84SBryan Gurney thisblock >>= dd->sect_per_block_shift;
216e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
21788e7cafdSBryan Gurney r = __dust_map_write(dd, thisblock);
218e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
219e4f3fabdSBryan Gurney }
220e4f3fabdSBryan Gurney
22188e7cafdSBryan Gurney return r;
222e4f3fabdSBryan Gurney }
223e4f3fabdSBryan Gurney
dust_map(struct dm_target * ti,struct bio * bio)224e4f3fabdSBryan Gurney static int dust_map(struct dm_target *ti, struct bio *bio)
225e4f3fabdSBryan Gurney {
226e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
227cc7a7fb3SBryan Gurney int r;
228e4f3fabdSBryan Gurney
229e4f3fabdSBryan Gurney bio_set_dev(bio, dd->dev->bdev);
230e4f3fabdSBryan Gurney bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
231e4f3fabdSBryan Gurney
232e4f3fabdSBryan Gurney if (bio_data_dir(bio) == READ)
233cc7a7fb3SBryan Gurney r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
234e4f3fabdSBryan Gurney else
235cc7a7fb3SBryan Gurney r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
236e4f3fabdSBryan Gurney
237cc7a7fb3SBryan Gurney return r;
238e4f3fabdSBryan Gurney }
239e4f3fabdSBryan Gurney
__dust_clear_badblocks(struct rb_root * tree,unsigned long long count)240e4f3fabdSBryan Gurney static bool __dust_clear_badblocks(struct rb_root *tree,
241e4f3fabdSBryan Gurney unsigned long long count)
242e4f3fabdSBryan Gurney {
243e4f3fabdSBryan Gurney struct rb_node *node = NULL, *nnode = NULL;
244e4f3fabdSBryan Gurney
245e4f3fabdSBryan Gurney nnode = rb_first(tree);
246e4f3fabdSBryan Gurney if (nnode == NULL) {
247e4f3fabdSBryan Gurney BUG_ON(count != 0);
248e4f3fabdSBryan Gurney return false;
249e4f3fabdSBryan Gurney }
250e4f3fabdSBryan Gurney
251e4f3fabdSBryan Gurney while (nnode) {
252e4f3fabdSBryan Gurney node = nnode;
253e4f3fabdSBryan Gurney nnode = rb_next(node);
254e4f3fabdSBryan Gurney rb_erase(node, tree);
255e4f3fabdSBryan Gurney count--;
256e4f3fabdSBryan Gurney kfree(node);
257e4f3fabdSBryan Gurney }
258e4f3fabdSBryan Gurney BUG_ON(count != 0);
259e4f3fabdSBryan Gurney BUG_ON(tree->rb_node != NULL);
260e4f3fabdSBryan Gurney
261e4f3fabdSBryan Gurney return true;
262e4f3fabdSBryan Gurney }
263e4f3fabdSBryan Gurney
dust_clear_badblocks(struct dust_device * dd,char * result,unsigned int maxlen,unsigned int * sz_ptr)2644f7f590bSyangerkun static int dust_clear_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
2654f7f590bSyangerkun unsigned int *sz_ptr)
266e4f3fabdSBryan Gurney {
267e4f3fabdSBryan Gurney unsigned long flags;
268e4f3fabdSBryan Gurney struct rb_root badblocklist;
269e4f3fabdSBryan Gurney unsigned long long badblock_count;
2704f7f590bSyangerkun unsigned int sz = *sz_ptr;
271e4f3fabdSBryan Gurney
272e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
273e4f3fabdSBryan Gurney badblocklist = dd->badblocklist;
274e4f3fabdSBryan Gurney badblock_count = dd->badblock_count;
275e4f3fabdSBryan Gurney dd->badblocklist = RB_ROOT;
276e4f3fabdSBryan Gurney dd->badblock_count = 0;
277e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
278e4f3fabdSBryan Gurney
279e4f3fabdSBryan Gurney if (!__dust_clear_badblocks(&badblocklist, badblock_count))
2804f7f590bSyangerkun DMEMIT("%s: no badblocks found", __func__);
281e4f3fabdSBryan Gurney else
2824f7f590bSyangerkun DMEMIT("%s: badblocks cleared", __func__);
283e4f3fabdSBryan Gurney
2844f7f590bSyangerkun return 1;
285e4f3fabdSBryan Gurney }
286e4f3fabdSBryan Gurney
dust_list_badblocks(struct dust_device * dd,char * result,unsigned int maxlen,unsigned int * sz_ptr)2870c248ea2Syangerkun static int dust_list_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
2880c248ea2Syangerkun unsigned int *sz_ptr)
2890c248ea2Syangerkun {
2900c248ea2Syangerkun unsigned long flags;
2910c248ea2Syangerkun struct rb_root badblocklist;
2920c248ea2Syangerkun struct rb_node *node;
2930c248ea2Syangerkun struct badblock *bblk;
2940c248ea2Syangerkun unsigned int sz = *sz_ptr;
2950c248ea2Syangerkun unsigned long long num = 0;
2960c248ea2Syangerkun
2970c248ea2Syangerkun spin_lock_irqsave(&dd->dust_lock, flags);
2980c248ea2Syangerkun badblocklist = dd->badblocklist;
2990c248ea2Syangerkun for (node = rb_first(&badblocklist); node; node = rb_next(node)) {
3000c248ea2Syangerkun bblk = rb_entry(node, struct badblock, node);
3010c248ea2Syangerkun DMEMIT("%llu\n", bblk->bb);
3020c248ea2Syangerkun num++;
3030c248ea2Syangerkun }
3040c248ea2Syangerkun
3050c248ea2Syangerkun spin_unlock_irqrestore(&dd->dust_lock, flags);
3060c248ea2Syangerkun if (!num)
3070c248ea2Syangerkun DMEMIT("No blocks in badblocklist");
3080c248ea2Syangerkun
3090c248ea2Syangerkun return 1;
3100c248ea2Syangerkun }
3110c248ea2Syangerkun
312e4f3fabdSBryan Gurney /*
313e4f3fabdSBryan Gurney * Target parameters:
314e4f3fabdSBryan Gurney *
315e4f3fabdSBryan Gurney * <device_path> <offset> <blksz>
316e4f3fabdSBryan Gurney *
317e4f3fabdSBryan Gurney * device_path: path to the block device
318e4f3fabdSBryan Gurney * offset: offset to data area from start of device_path
319e4f3fabdSBryan Gurney * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
320e4f3fabdSBryan Gurney */
dust_ctr(struct dm_target * ti,unsigned int argc,char ** argv)321e4f3fabdSBryan Gurney static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
322e4f3fabdSBryan Gurney {
323e4f3fabdSBryan Gurney struct dust_device *dd;
324e4f3fabdSBryan Gurney unsigned long long tmp;
325e4f3fabdSBryan Gurney char dummy;
326e4f3fabdSBryan Gurney unsigned int blksz;
327e4f3fabdSBryan Gurney unsigned int sect_per_block;
328e4f3fabdSBryan Gurney sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
329e4f3fabdSBryan Gurney sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
330e4f3fabdSBryan Gurney
331e4f3fabdSBryan Gurney if (argc != 3) {
332e4f3fabdSBryan Gurney ti->error = "Invalid argument count";
333e4f3fabdSBryan Gurney return -EINVAL;
334e4f3fabdSBryan Gurney }
335e4f3fabdSBryan Gurney
336e4f3fabdSBryan Gurney if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
337e4f3fabdSBryan Gurney ti->error = "Invalid block size parameter";
338e4f3fabdSBryan Gurney return -EINVAL;
339e4f3fabdSBryan Gurney }
340e4f3fabdSBryan Gurney
341e4f3fabdSBryan Gurney if (blksz < 512) {
342e4f3fabdSBryan Gurney ti->error = "Block size must be at least 512";
343e4f3fabdSBryan Gurney return -EINVAL;
344e4f3fabdSBryan Gurney }
345e4f3fabdSBryan Gurney
346e4f3fabdSBryan Gurney if (!is_power_of_2(blksz)) {
347e4f3fabdSBryan Gurney ti->error = "Block size must be a power of 2";
348e4f3fabdSBryan Gurney return -EINVAL;
349e4f3fabdSBryan Gurney }
350e4f3fabdSBryan Gurney
351e4f3fabdSBryan Gurney if (to_sector(blksz) > max_block_sectors) {
352e4f3fabdSBryan Gurney ti->error = "Block size is too large";
353e4f3fabdSBryan Gurney return -EINVAL;
354e4f3fabdSBryan Gurney }
355e4f3fabdSBryan Gurney
356e4f3fabdSBryan Gurney sect_per_block = (blksz >> SECTOR_SHIFT);
357e4f3fabdSBryan Gurney
358e4f3fabdSBryan Gurney if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
359e4f3fabdSBryan Gurney ti->error = "Invalid device offset sector";
360e4f3fabdSBryan Gurney return -EINVAL;
361e4f3fabdSBryan Gurney }
362e4f3fabdSBryan Gurney
363e4f3fabdSBryan Gurney dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
364e4f3fabdSBryan Gurney if (dd == NULL) {
365e4f3fabdSBryan Gurney ti->error = "Cannot allocate context";
366e4f3fabdSBryan Gurney return -ENOMEM;
367e4f3fabdSBryan Gurney }
368e4f3fabdSBryan Gurney
369e4f3fabdSBryan Gurney if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
370e4f3fabdSBryan Gurney ti->error = "Device lookup failed";
371e4f3fabdSBryan Gurney kfree(dd);
372e4f3fabdSBryan Gurney return -EINVAL;
373e4f3fabdSBryan Gurney }
374e4f3fabdSBryan Gurney
375e4f3fabdSBryan Gurney dd->sect_per_block = sect_per_block;
376e4f3fabdSBryan Gurney dd->blksz = blksz;
377e4f3fabdSBryan Gurney dd->start = tmp;
378e4f3fabdSBryan Gurney
37908c04c84SBryan Gurney dd->sect_per_block_shift = __ffs(sect_per_block);
38008c04c84SBryan Gurney
381e4f3fabdSBryan Gurney /*
382e4f3fabdSBryan Gurney * Whether to fail a read on a "bad" block.
383e4f3fabdSBryan Gurney * Defaults to false; enabled later by message.
384e4f3fabdSBryan Gurney */
385e4f3fabdSBryan Gurney dd->fail_read_on_bb = false;
386e4f3fabdSBryan Gurney
387e4f3fabdSBryan Gurney /*
388e4f3fabdSBryan Gurney * Initialize bad block list rbtree.
389e4f3fabdSBryan Gurney */
390e4f3fabdSBryan Gurney dd->badblocklist = RB_ROOT;
391e4f3fabdSBryan Gurney dd->badblock_count = 0;
392e4f3fabdSBryan Gurney spin_lock_init(&dd->dust_lock);
393e4f3fabdSBryan Gurney
394e4f3fabdSBryan Gurney dd->quiet_mode = false;
395e4f3fabdSBryan Gurney
396e4f3fabdSBryan Gurney BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
397e4f3fabdSBryan Gurney
398e4f3fabdSBryan Gurney ti->num_discard_bios = 1;
399e4f3fabdSBryan Gurney ti->num_flush_bios = 1;
400e4f3fabdSBryan Gurney ti->private = dd;
401e4f3fabdSBryan Gurney
402e4f3fabdSBryan Gurney return 0;
403e4f3fabdSBryan Gurney }
404e4f3fabdSBryan Gurney
dust_dtr(struct dm_target * ti)405e4f3fabdSBryan Gurney static void dust_dtr(struct dm_target *ti)
406e4f3fabdSBryan Gurney {
407e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
408e4f3fabdSBryan Gurney
409e4f3fabdSBryan Gurney __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
410e4f3fabdSBryan Gurney dm_put_device(ti, dd->dev);
411e4f3fabdSBryan Gurney kfree(dd);
412e4f3fabdSBryan Gurney }
413e4f3fabdSBryan Gurney
dust_message(struct dm_target * ti,unsigned int argc,char ** argv,char * result,unsigned int maxlen)414e4f3fabdSBryan Gurney static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
4154f7f590bSyangerkun char *result, unsigned int maxlen)
416e4f3fabdSBryan Gurney {
417e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
4186dcbb52cSChristoph Hellwig sector_t size = bdev_nr_sectors(dd->dev->bdev);
419e4f3fabdSBryan Gurney bool invalid_msg = false;
4206ec1be50SBryan Gurney int r = -EINVAL;
421e4f3fabdSBryan Gurney unsigned long long tmp, block;
42272d7df4cSBryan Gurney unsigned char wr_fail_cnt;
42372d7df4cSBryan Gurney unsigned int tmp_ui;
424e4f3fabdSBryan Gurney unsigned long flags;
4254f7f590bSyangerkun unsigned int sz = 0;
426e4f3fabdSBryan Gurney char dummy;
427e4f3fabdSBryan Gurney
428e4f3fabdSBryan Gurney if (argc == 1) {
429e4f3fabdSBryan Gurney if (!strcasecmp(argv[0], "addbadblock") ||
430e4f3fabdSBryan Gurney !strcasecmp(argv[0], "removebadblock") ||
431e4f3fabdSBryan Gurney !strcasecmp(argv[0], "queryblock")) {
432e4f3fabdSBryan Gurney DMERR("%s requires an additional argument", argv[0]);
433e4f3fabdSBryan Gurney } else if (!strcasecmp(argv[0], "disable")) {
434e4f3fabdSBryan Gurney DMINFO("disabling read failures on bad sectors");
435e4f3fabdSBryan Gurney dd->fail_read_on_bb = false;
4366ec1be50SBryan Gurney r = 0;
437e4f3fabdSBryan Gurney } else if (!strcasecmp(argv[0], "enable")) {
438e4f3fabdSBryan Gurney DMINFO("enabling read failures on bad sectors");
439e4f3fabdSBryan Gurney dd->fail_read_on_bb = true;
4406ec1be50SBryan Gurney r = 0;
441e4f3fabdSBryan Gurney } else if (!strcasecmp(argv[0], "countbadblocks")) {
442e4f3fabdSBryan Gurney spin_lock_irqsave(&dd->dust_lock, flags);
4434f7f590bSyangerkun DMEMIT("countbadblocks: %llu badblock(s) found",
444e4f3fabdSBryan Gurney dd->badblock_count);
445e4f3fabdSBryan Gurney spin_unlock_irqrestore(&dd->dust_lock, flags);
4464f7f590bSyangerkun r = 1;
447e4f3fabdSBryan Gurney } else if (!strcasecmp(argv[0], "clearbadblocks")) {
4484f7f590bSyangerkun r = dust_clear_badblocks(dd, result, maxlen, &sz);
449e4f3fabdSBryan Gurney } else if (!strcasecmp(argv[0], "quiet")) {
450e4f3fabdSBryan Gurney if (!dd->quiet_mode)
451e4f3fabdSBryan Gurney dd->quiet_mode = true;
452e4f3fabdSBryan Gurney else
453e4f3fabdSBryan Gurney dd->quiet_mode = false;
4546ec1be50SBryan Gurney r = 0;
4550c248ea2Syangerkun } else if (!strcasecmp(argv[0], "listbadblocks")) {
4560c248ea2Syangerkun r = dust_list_badblocks(dd, result, maxlen, &sz);
457e4f3fabdSBryan Gurney } else {
458e4f3fabdSBryan Gurney invalid_msg = true;
459e4f3fabdSBryan Gurney }
460e4f3fabdSBryan Gurney } else if (argc == 2) {
461e4f3fabdSBryan Gurney if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
4626ec1be50SBryan Gurney return r;
463e4f3fabdSBryan Gurney
464e4f3fabdSBryan Gurney block = tmp;
465e4f3fabdSBryan Gurney sector_div(size, dd->sect_per_block);
466cacddeabSColin Ian King if (block > size) {
467e4f3fabdSBryan Gurney DMERR("selected block value out of range");
4686ec1be50SBryan Gurney return r;
469e4f3fabdSBryan Gurney }
470e4f3fabdSBryan Gurney
471e4f3fabdSBryan Gurney if (!strcasecmp(argv[0], "addbadblock"))
47272d7df4cSBryan Gurney r = dust_add_block(dd, block, 0);
473e4f3fabdSBryan Gurney else if (!strcasecmp(argv[0], "removebadblock"))
4746ec1be50SBryan Gurney r = dust_remove_block(dd, block);
475e4f3fabdSBryan Gurney else if (!strcasecmp(argv[0], "queryblock"))
4764f7f590bSyangerkun r = dust_query_block(dd, block, result, maxlen, &sz);
477e4f3fabdSBryan Gurney else
478e4f3fabdSBryan Gurney invalid_msg = true;
479e4f3fabdSBryan Gurney
48072d7df4cSBryan Gurney } else if (argc == 3) {
48172d7df4cSBryan Gurney if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
48272d7df4cSBryan Gurney return r;
48372d7df4cSBryan Gurney
48472d7df4cSBryan Gurney if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
48572d7df4cSBryan Gurney return r;
48672d7df4cSBryan Gurney
48772d7df4cSBryan Gurney block = tmp;
48872d7df4cSBryan Gurney if (tmp_ui > 255) {
48972d7df4cSBryan Gurney DMERR("selected write fail count out of range");
49072d7df4cSBryan Gurney return r;
49172d7df4cSBryan Gurney }
49272d7df4cSBryan Gurney wr_fail_cnt = tmp_ui;
49372d7df4cSBryan Gurney sector_div(size, dd->sect_per_block);
49472d7df4cSBryan Gurney if (block > size) {
49572d7df4cSBryan Gurney DMERR("selected block value out of range");
49672d7df4cSBryan Gurney return r;
49772d7df4cSBryan Gurney }
49872d7df4cSBryan Gurney
49972d7df4cSBryan Gurney if (!strcasecmp(argv[0], "addbadblock"))
50072d7df4cSBryan Gurney r = dust_add_block(dd, block, wr_fail_cnt);
50172d7df4cSBryan Gurney else
50272d7df4cSBryan Gurney invalid_msg = true;
50372d7df4cSBryan Gurney
504e4f3fabdSBryan Gurney } else
505e4f3fabdSBryan Gurney DMERR("invalid number of arguments '%d'", argc);
506e4f3fabdSBryan Gurney
507e4f3fabdSBryan Gurney if (invalid_msg)
508e4f3fabdSBryan Gurney DMERR("unrecognized message '%s' received", argv[0]);
509e4f3fabdSBryan Gurney
5106ec1be50SBryan Gurney return r;
511e4f3fabdSBryan Gurney }
512e4f3fabdSBryan Gurney
dust_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)513e4f3fabdSBryan Gurney static void dust_status(struct dm_target *ti, status_type_t type,
514e4f3fabdSBryan Gurney unsigned int status_flags, char *result, unsigned int maxlen)
515e4f3fabdSBryan Gurney {
516e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
517e4f3fabdSBryan Gurney unsigned int sz = 0;
518e4f3fabdSBryan Gurney
519e4f3fabdSBryan Gurney switch (type) {
520e4f3fabdSBryan Gurney case STATUSTYPE_INFO:
521e4f3fabdSBryan Gurney DMEMIT("%s %s %s", dd->dev->name,
522e4f3fabdSBryan Gurney dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
523e4f3fabdSBryan Gurney dd->quiet_mode ? "quiet" : "verbose");
524e4f3fabdSBryan Gurney break;
525e4f3fabdSBryan Gurney
526e4f3fabdSBryan Gurney case STATUSTYPE_TABLE:
527e4f3fabdSBryan Gurney DMEMIT("%s %llu %u", dd->dev->name,
528e4f3fabdSBryan Gurney (unsigned long long)dd->start, dd->blksz);
529e4f3fabdSBryan Gurney break;
5308ec45662STushar Sugandhi
5318ec45662STushar Sugandhi case STATUSTYPE_IMA:
5328ec45662STushar Sugandhi *result = '\0';
5338ec45662STushar Sugandhi break;
534e4f3fabdSBryan Gurney }
535e4f3fabdSBryan Gurney }
536e4f3fabdSBryan Gurney
dust_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)537e4f3fabdSBryan Gurney static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
538e4f3fabdSBryan Gurney {
539e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
540e4f3fabdSBryan Gurney struct dm_dev *dev = dd->dev;
541e4f3fabdSBryan Gurney
542e4f3fabdSBryan Gurney *bdev = dev->bdev;
543e4f3fabdSBryan Gurney
544e4f3fabdSBryan Gurney /*
545e4f3fabdSBryan Gurney * Only pass ioctls through if the device sizes match exactly.
546e4f3fabdSBryan Gurney */
5476dcbb52cSChristoph Hellwig if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
548e4f3fabdSBryan Gurney return 1;
549e4f3fabdSBryan Gurney
550e4f3fabdSBryan Gurney return 0;
551e4f3fabdSBryan Gurney }
552e4f3fabdSBryan Gurney
dust_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)553e4f3fabdSBryan Gurney static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
554e4f3fabdSBryan Gurney void *data)
555e4f3fabdSBryan Gurney {
556e4f3fabdSBryan Gurney struct dust_device *dd = ti->private;
557e4f3fabdSBryan Gurney
558e4f3fabdSBryan Gurney return fn(ti, dd->dev, dd->start, ti->len, data);
559e4f3fabdSBryan Gurney }
560e4f3fabdSBryan Gurney
561e4f3fabdSBryan Gurney static struct target_type dust_target = {
562e4f3fabdSBryan Gurney .name = "dust",
563e4f3fabdSBryan Gurney .version = {1, 0, 0},
564e4f3fabdSBryan Gurney .module = THIS_MODULE,
565e4f3fabdSBryan Gurney .ctr = dust_ctr,
566e4f3fabdSBryan Gurney .dtr = dust_dtr,
567e4f3fabdSBryan Gurney .iterate_devices = dust_iterate_devices,
568e4f3fabdSBryan Gurney .map = dust_map,
569e4f3fabdSBryan Gurney .message = dust_message,
570e4f3fabdSBryan Gurney .status = dust_status,
571e4f3fabdSBryan Gurney .prepare_ioctl = dust_prepare_ioctl,
572e4f3fabdSBryan Gurney };
5733664ff82SYangtao Li module_dm(dust);
574e4f3fabdSBryan Gurney
575e4f3fabdSBryan Gurney MODULE_DESCRIPTION(DM_NAME " dust test target");
576*fa34e589SMike Snitzer MODULE_AUTHOR("Bryan Gurney <dm-devel@lists.linux.dev>");
577e4f3fabdSBryan Gurney MODULE_LICENSE("GPL");
578