1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018 Red Hat, Inc. 4 * 5 * This is a test "dust" device, which fails reads on specified 6 * sectors, emulating the behavior of a hard disk drive sending 7 * a "Read Medium Error" sense. 8 * 9 */ 10 11 #include <linux/device-mapper.h> 12 #include <linux/module.h> 13 #include <linux/rbtree.h> 14 15 #define DM_MSG_PREFIX "dust" 16 17 struct badblock { 18 struct rb_node node; 19 sector_t bb; 20 }; 21 22 struct dust_device { 23 struct dm_dev *dev; 24 struct rb_root badblocklist; 25 unsigned long long badblock_count; 26 spinlock_t dust_lock; 27 unsigned int blksz; 28 unsigned int sect_per_block; 29 sector_t start; 30 bool fail_read_on_bb:1; 31 bool quiet_mode:1; 32 }; 33 34 static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk) 35 { 36 struct rb_node *node = root->rb_node; 37 38 while (node) { 39 struct badblock *bblk = rb_entry(node, struct badblock, node); 40 41 if (bblk->bb > blk) 42 node = node->rb_left; 43 else if (bblk->bb < blk) 44 node = node->rb_right; 45 else 46 return bblk; 47 } 48 49 return NULL; 50 } 51 52 static bool dust_rb_insert(struct rb_root *root, struct badblock *new) 53 { 54 struct badblock *bblk; 55 struct rb_node **link = &root->rb_node, *parent = NULL; 56 sector_t value = new->bb; 57 58 while (*link) { 59 parent = *link; 60 bblk = rb_entry(parent, struct badblock, node); 61 62 if (bblk->bb > value) 63 link = &(*link)->rb_left; 64 else if (bblk->bb < value) 65 link = &(*link)->rb_right; 66 else 67 return false; 68 } 69 70 rb_link_node(&new->node, parent, link); 71 rb_insert_color(&new->node, root); 72 73 return true; 74 } 75 76 static int dust_remove_block(struct dust_device *dd, unsigned long long block) 77 { 78 struct badblock *bblock; 79 unsigned long flags; 80 81 spin_lock_irqsave(&dd->dust_lock, flags); 82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 84 if (bblock == NULL) { 85 if (!dd->quiet_mode) { 86 DMERR("%s: block %llu not found in badblocklist", 87 __func__, block); 88 } 89 spin_unlock_irqrestore(&dd->dust_lock, flags); 90 return -EINVAL; 91 } 92 93 rb_erase(&bblock->node, &dd->badblocklist); 94 dd->badblock_count--; 95 if (!dd->quiet_mode) 96 DMINFO("%s: badblock removed at block %llu", __func__, block); 97 kfree(bblock); 98 spin_unlock_irqrestore(&dd->dust_lock, flags); 99 100 return 0; 101 } 102 103 static int dust_add_block(struct dust_device *dd, unsigned long long block) 104 { 105 struct badblock *bblock; 106 unsigned long flags; 107 108 bblock = kmalloc(sizeof(*bblock), GFP_KERNEL); 109 if (bblock == NULL) { 110 if (!dd->quiet_mode) 111 DMERR("%s: badblock allocation failed", __func__); 112 return -ENOMEM; 113 } 114 115 spin_lock_irqsave(&dd->dust_lock, flags); 116 bblock->bb = block * dd->sect_per_block; 117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dd->quiet_mode) { 119 DMERR("%s: block %llu already in badblocklist", 120 __func__, block); 121 } 122 spin_unlock_irqrestore(&dd->dust_lock, flags); 123 kfree(bblock); 124 return -EINVAL; 125 } 126 127 dd->badblock_count++; 128 if (!dd->quiet_mode) 129 DMINFO("%s: badblock added at block %llu", __func__, block); 130 spin_unlock_irqrestore(&dd->dust_lock, flags); 131 132 return 0; 133 } 134 135 static int dust_query_block(struct dust_device *dd, unsigned long long block) 136 { 137 struct badblock *bblock; 138 unsigned long flags; 139 140 spin_lock_irqsave(&dd->dust_lock, flags); 141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 if (bblock != NULL) 143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 else 145 DMINFO("%s: block %llu not found in badblocklist", __func__, block); 146 spin_unlock_irqrestore(&dd->dust_lock, flags); 147 148 return 0; 149 } 150 151 static int __dust_map_read(struct dust_device *dd, sector_t thisblock) 152 { 153 struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock); 154 155 if (bblk) 156 return DM_MAPIO_KILL; 157 158 return DM_MAPIO_REMAPPED; 159 } 160 161 static int dust_map_read(struct dust_device *dd, sector_t thisblock, 162 bool fail_read_on_bb) 163 { 164 unsigned long flags; 165 int ret = DM_MAPIO_REMAPPED; 166 167 if (fail_read_on_bb) { 168 spin_lock_irqsave(&dd->dust_lock, flags); 169 ret = __dust_map_read(dd, thisblock); 170 spin_unlock_irqrestore(&dd->dust_lock, flags); 171 } 172 173 return ret; 174 } 175 176 static void __dust_map_write(struct dust_device *dd, sector_t thisblock) 177 { 178 struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock); 179 180 if (bblk) { 181 rb_erase(&bblk->node, &dd->badblocklist); 182 dd->badblock_count--; 183 kfree(bblk); 184 if (!dd->quiet_mode) { 185 sector_div(thisblock, dd->sect_per_block); 186 DMINFO("block %llu removed from badblocklist by write", 187 (unsigned long long)thisblock); 188 } 189 } 190 } 191 192 static int dust_map_write(struct dust_device *dd, sector_t thisblock, 193 bool fail_read_on_bb) 194 { 195 unsigned long flags; 196 197 if (fail_read_on_bb) { 198 spin_lock_irqsave(&dd->dust_lock, flags); 199 __dust_map_write(dd, thisblock); 200 spin_unlock_irqrestore(&dd->dust_lock, flags); 201 } 202 203 return DM_MAPIO_REMAPPED; 204 } 205 206 static int dust_map(struct dm_target *ti, struct bio *bio) 207 { 208 struct dust_device *dd = ti->private; 209 int ret; 210 211 bio_set_dev(bio, dd->dev->bdev); 212 bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); 213 214 if (bio_data_dir(bio) == READ) 215 ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); 216 else 217 ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); 218 219 return ret; 220 } 221 222 static bool __dust_clear_badblocks(struct rb_root *tree, 223 unsigned long long count) 224 { 225 struct rb_node *node = NULL, *nnode = NULL; 226 227 nnode = rb_first(tree); 228 if (nnode == NULL) { 229 BUG_ON(count != 0); 230 return false; 231 } 232 233 while (nnode) { 234 node = nnode; 235 nnode = rb_next(node); 236 rb_erase(node, tree); 237 count--; 238 kfree(node); 239 } 240 BUG_ON(count != 0); 241 BUG_ON(tree->rb_node != NULL); 242 243 return true; 244 } 245 246 static int dust_clear_badblocks(struct dust_device *dd) 247 { 248 unsigned long flags; 249 struct rb_root badblocklist; 250 unsigned long long badblock_count; 251 252 spin_lock_irqsave(&dd->dust_lock, flags); 253 badblocklist = dd->badblocklist; 254 badblock_count = dd->badblock_count; 255 dd->badblocklist = RB_ROOT; 256 dd->badblock_count = 0; 257 spin_unlock_irqrestore(&dd->dust_lock, flags); 258 259 if (!__dust_clear_badblocks(&badblocklist, badblock_count)) 260 DMINFO("%s: no badblocks found", __func__); 261 else 262 DMINFO("%s: badblocks cleared", __func__); 263 264 return 0; 265 } 266 267 /* 268 * Target parameters: 269 * 270 * <device_path> <offset> <blksz> 271 * 272 * device_path: path to the block device 273 * offset: offset to data area from start of device_path 274 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2) 275 */ 276 static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv) 277 { 278 struct dust_device *dd; 279 unsigned long long tmp; 280 char dummy; 281 unsigned int blksz; 282 unsigned int sect_per_block; 283 sector_t DUST_MAX_BLKSZ_SECTORS = 2097152; 284 sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS); 285 286 if (argc != 3) { 287 ti->error = "Invalid argument count"; 288 return -EINVAL; 289 } 290 291 if (kstrtouint(argv[2], 10, &blksz) || !blksz) { 292 ti->error = "Invalid block size parameter"; 293 return -EINVAL; 294 } 295 296 if (blksz < 512) { 297 ti->error = "Block size must be at least 512"; 298 return -EINVAL; 299 } 300 301 if (!is_power_of_2(blksz)) { 302 ti->error = "Block size must be a power of 2"; 303 return -EINVAL; 304 } 305 306 if (to_sector(blksz) > max_block_sectors) { 307 ti->error = "Block size is too large"; 308 return -EINVAL; 309 } 310 311 sect_per_block = (blksz >> SECTOR_SHIFT); 312 313 if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { 314 ti->error = "Invalid device offset sector"; 315 return -EINVAL; 316 } 317 318 dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL); 319 if (dd == NULL) { 320 ti->error = "Cannot allocate context"; 321 return -ENOMEM; 322 } 323 324 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) { 325 ti->error = "Device lookup failed"; 326 kfree(dd); 327 return -EINVAL; 328 } 329 330 dd->sect_per_block = sect_per_block; 331 dd->blksz = blksz; 332 dd->start = tmp; 333 334 /* 335 * Whether to fail a read on a "bad" block. 336 * Defaults to false; enabled later by message. 337 */ 338 dd->fail_read_on_bb = false; 339 340 /* 341 * Initialize bad block list rbtree. 342 */ 343 dd->badblocklist = RB_ROOT; 344 dd->badblock_count = 0; 345 spin_lock_init(&dd->dust_lock); 346 347 dd->quiet_mode = false; 348 349 BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0); 350 351 ti->num_discard_bios = 1; 352 ti->num_flush_bios = 1; 353 ti->private = dd; 354 355 return 0; 356 } 357 358 static void dust_dtr(struct dm_target *ti) 359 { 360 struct dust_device *dd = ti->private; 361 362 __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count); 363 dm_put_device(ti, dd->dev); 364 kfree(dd); 365 } 366 367 static int dust_message(struct dm_target *ti, unsigned int argc, char **argv, 368 char *result_buf, unsigned int maxlen) 369 { 370 struct dust_device *dd = ti->private; 371 sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT; 372 bool invalid_msg = false; 373 int result = -EINVAL; 374 unsigned long long tmp, block; 375 unsigned long flags; 376 char dummy; 377 378 if (argc == 1) { 379 if (!strcasecmp(argv[0], "addbadblock") || 380 !strcasecmp(argv[0], "removebadblock") || 381 !strcasecmp(argv[0], "queryblock")) { 382 DMERR("%s requires an additional argument", argv[0]); 383 } else if (!strcasecmp(argv[0], "disable")) { 384 DMINFO("disabling read failures on bad sectors"); 385 dd->fail_read_on_bb = false; 386 result = 0; 387 } else if (!strcasecmp(argv[0], "enable")) { 388 DMINFO("enabling read failures on bad sectors"); 389 dd->fail_read_on_bb = true; 390 result = 0; 391 } else if (!strcasecmp(argv[0], "countbadblocks")) { 392 spin_lock_irqsave(&dd->dust_lock, flags); 393 DMINFO("countbadblocks: %llu badblock(s) found", 394 dd->badblock_count); 395 spin_unlock_irqrestore(&dd->dust_lock, flags); 396 result = 0; 397 } else if (!strcasecmp(argv[0], "clearbadblocks")) { 398 result = dust_clear_badblocks(dd); 399 } else if (!strcasecmp(argv[0], "quiet")) { 400 if (!dd->quiet_mode) 401 dd->quiet_mode = true; 402 else 403 dd->quiet_mode = false; 404 result = 0; 405 } else { 406 invalid_msg = true; 407 } 408 } else if (argc == 2) { 409 if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) 410 return result; 411 412 block = tmp; 413 sector_div(size, dd->sect_per_block); 414 if (block > size) { 415 DMERR("selected block value out of range"); 416 return result; 417 } 418 419 if (!strcasecmp(argv[0], "addbadblock")) 420 result = dust_add_block(dd, block); 421 else if (!strcasecmp(argv[0], "removebadblock")) 422 result = dust_remove_block(dd, block); 423 else if (!strcasecmp(argv[0], "queryblock")) 424 result = dust_query_block(dd, block); 425 else 426 invalid_msg = true; 427 428 } else 429 DMERR("invalid number of arguments '%d'", argc); 430 431 if (invalid_msg) 432 DMERR("unrecognized message '%s' received", argv[0]); 433 434 return result; 435 } 436 437 static void dust_status(struct dm_target *ti, status_type_t type, 438 unsigned int status_flags, char *result, unsigned int maxlen) 439 { 440 struct dust_device *dd = ti->private; 441 unsigned int sz = 0; 442 443 switch (type) { 444 case STATUSTYPE_INFO: 445 DMEMIT("%s %s %s", dd->dev->name, 446 dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass", 447 dd->quiet_mode ? "quiet" : "verbose"); 448 break; 449 450 case STATUSTYPE_TABLE: 451 DMEMIT("%s %llu %u", dd->dev->name, 452 (unsigned long long)dd->start, dd->blksz); 453 break; 454 } 455 } 456 457 static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) 458 { 459 struct dust_device *dd = ti->private; 460 struct dm_dev *dev = dd->dev; 461 462 *bdev = dev->bdev; 463 464 /* 465 * Only pass ioctls through if the device sizes match exactly. 466 */ 467 if (dd->start || 468 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) 469 return 1; 470 471 return 0; 472 } 473 474 static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, 475 void *data) 476 { 477 struct dust_device *dd = ti->private; 478 479 return fn(ti, dd->dev, dd->start, ti->len, data); 480 } 481 482 static struct target_type dust_target = { 483 .name = "dust", 484 .version = {1, 0, 0}, 485 .module = THIS_MODULE, 486 .ctr = dust_ctr, 487 .dtr = dust_dtr, 488 .iterate_devices = dust_iterate_devices, 489 .map = dust_map, 490 .message = dust_message, 491 .status = dust_status, 492 .prepare_ioctl = dust_prepare_ioctl, 493 }; 494 495 static int __init dm_dust_init(void) 496 { 497 int result = dm_register_target(&dust_target); 498 499 if (result < 0) 500 DMERR("dm_register_target failed %d", result); 501 502 return result; 503 } 504 505 static void __exit dm_dust_exit(void) 506 { 507 dm_unregister_target(&dust_target); 508 } 509 510 module_init(dm_dust_init); 511 module_exit(dm_dust_exit); 512 513 MODULE_DESCRIPTION(DM_NAME " dust test target"); 514 MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>"); 515 MODULE_LICENSE("GPL"); 516