1 /* 2 * Copyright (C) 2003 Sistina Software (UK) Limited. 3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/device-mapper.h> 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/blkdev.h> 13 #include <linux/bio.h> 14 #include <linux/slab.h> 15 16 #define DM_MSG_PREFIX "flakey" 17 18 #define all_corrupt_bio_flags_match(bio, fc) \ 19 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 20 21 /* 22 * Flakey: Used for testing only, simulates intermittent, 23 * catastrophic device failure. 24 */ 25 struct flakey_c { 26 struct dm_dev *dev; 27 unsigned long start_time; 28 sector_t start; 29 unsigned up_interval; 30 unsigned down_interval; 31 unsigned long flags; 32 unsigned corrupt_bio_byte; 33 unsigned corrupt_bio_rw; 34 unsigned corrupt_bio_value; 35 unsigned corrupt_bio_flags; 36 }; 37 38 enum feature_flag_bits { 39 DROP_WRITES, 40 ERROR_WRITES 41 }; 42 43 struct per_bio_data { 44 bool bio_submitted; 45 }; 46 47 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, 48 struct dm_target *ti) 49 { 50 int r; 51 unsigned argc; 52 const char *arg_name; 53 54 static const struct dm_arg _args[] = { 55 {0, 6, "Invalid number of feature args"}, 56 {1, UINT_MAX, "Invalid corrupt bio byte"}, 57 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, 58 {0, UINT_MAX, "Invalid corrupt bio flags mask"}, 59 }; 60 61 /* No feature arguments supplied. */ 62 if (!as->argc) 63 return 0; 64 65 r = dm_read_arg_group(_args, as, &argc, &ti->error); 66 if (r) 67 return r; 68 69 while (argc) { 70 arg_name = dm_shift_arg(as); 71 argc--; 72 73 if (!arg_name) { 74 ti->error = "Insufficient feature arguments"; 75 return -EINVAL; 76 } 77 78 /* 79 * drop_writes 80 */ 81 if (!strcasecmp(arg_name, "drop_writes")) { 82 if (test_and_set_bit(DROP_WRITES, &fc->flags)) { 83 ti->error = "Feature drop_writes duplicated"; 84 return -EINVAL; 85 } else if (test_bit(ERROR_WRITES, &fc->flags)) { 86 ti->error = "Feature drop_writes conflicts with feature error_writes"; 87 return -EINVAL; 88 } 89 90 continue; 91 } 92 93 /* 94 * error_writes 95 */ 96 if (!strcasecmp(arg_name, "error_writes")) { 97 if (test_and_set_bit(ERROR_WRITES, &fc->flags)) { 98 ti->error = "Feature error_writes duplicated"; 99 return -EINVAL; 100 101 } else if (test_bit(DROP_WRITES, &fc->flags)) { 102 ti->error = "Feature error_writes conflicts with feature drop_writes"; 103 return -EINVAL; 104 } 105 106 continue; 107 } 108 109 /* 110 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> 111 */ 112 if (!strcasecmp(arg_name, "corrupt_bio_byte")) { 113 if (!argc) { 114 ti->error = "Feature corrupt_bio_byte requires parameters"; 115 return -EINVAL; 116 } 117 118 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); 119 if (r) 120 return r; 121 argc--; 122 123 /* 124 * Direction r or w? 125 */ 126 arg_name = dm_shift_arg(as); 127 if (!strcasecmp(arg_name, "w")) 128 fc->corrupt_bio_rw = WRITE; 129 else if (!strcasecmp(arg_name, "r")) 130 fc->corrupt_bio_rw = READ; 131 else { 132 ti->error = "Invalid corrupt bio direction (r or w)"; 133 return -EINVAL; 134 } 135 argc--; 136 137 /* 138 * Value of byte (0-255) to write in place of correct one. 139 */ 140 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); 141 if (r) 142 return r; 143 argc--; 144 145 /* 146 * Only corrupt bios with these flags set. 147 */ 148 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); 149 if (r) 150 return r; 151 argc--; 152 153 continue; 154 } 155 156 ti->error = "Unrecognised flakey feature requested"; 157 return -EINVAL; 158 } 159 160 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { 161 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; 162 return -EINVAL; 163 164 } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { 165 ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; 166 return -EINVAL; 167 } 168 169 return 0; 170 } 171 172 /* 173 * Construct a flakey mapping: 174 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*] 175 * 176 * Feature args: 177 * [drop_writes] 178 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>] 179 * 180 * Nth_byte starts from 1 for the first byte. 181 * Direction is r for READ or w for WRITE. 182 * bio_flags is ignored if 0. 183 */ 184 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) 185 { 186 static const struct dm_arg _args[] = { 187 {0, UINT_MAX, "Invalid up interval"}, 188 {0, UINT_MAX, "Invalid down interval"}, 189 }; 190 191 int r; 192 struct flakey_c *fc; 193 unsigned long long tmpll; 194 struct dm_arg_set as; 195 const char *devname; 196 char dummy; 197 198 as.argc = argc; 199 as.argv = argv; 200 201 if (argc < 4) { 202 ti->error = "Invalid argument count"; 203 return -EINVAL; 204 } 205 206 fc = kzalloc(sizeof(*fc), GFP_KERNEL); 207 if (!fc) { 208 ti->error = "Cannot allocate context"; 209 return -ENOMEM; 210 } 211 fc->start_time = jiffies; 212 213 devname = dm_shift_arg(&as); 214 215 r = -EINVAL; 216 if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { 217 ti->error = "Invalid device sector"; 218 goto bad; 219 } 220 fc->start = tmpll; 221 222 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); 223 if (r) 224 goto bad; 225 226 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); 227 if (r) 228 goto bad; 229 230 if (!(fc->up_interval + fc->down_interval)) { 231 ti->error = "Total (up + down) interval is zero"; 232 r = -EINVAL; 233 goto bad; 234 } 235 236 if (fc->up_interval + fc->down_interval < fc->up_interval) { 237 ti->error = "Interval overflow"; 238 r = -EINVAL; 239 goto bad; 240 } 241 242 r = parse_features(&as, fc, ti); 243 if (r) 244 goto bad; 245 246 r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev); 247 if (r) { 248 ti->error = "Device lookup failed"; 249 goto bad; 250 } 251 252 ti->num_flush_bios = 1; 253 ti->num_discard_bios = 1; 254 ti->per_io_data_size = sizeof(struct per_bio_data); 255 ti->private = fc; 256 return 0; 257 258 bad: 259 kfree(fc); 260 return r; 261 } 262 263 static void flakey_dtr(struct dm_target *ti) 264 { 265 struct flakey_c *fc = ti->private; 266 267 dm_put_device(ti, fc->dev); 268 kfree(fc); 269 } 270 271 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) 272 { 273 struct flakey_c *fc = ti->private; 274 275 return fc->start + dm_target_offset(ti, bi_sector); 276 } 277 278 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) 279 { 280 struct flakey_c *fc = ti->private; 281 282 bio_set_dev(bio, fc->dev->bdev); 283 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) 284 bio->bi_iter.bi_sector = 285 flakey_map_sector(ti, bio->bi_iter.bi_sector); 286 } 287 288 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 289 { 290 unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1; 291 292 struct bvec_iter iter; 293 struct bio_vec bvec; 294 295 if (!bio_has_data(bio)) 296 return; 297 298 /* 299 * Overwrite the Nth byte of the bio's data, on whichever page 300 * it falls. 301 */ 302 bio_for_each_segment(bvec, bio, iter) { 303 if (bio_iter_len(bio, iter) > corrupt_bio_byte) { 304 char *segment = (page_address(bio_iter_page(bio, iter)) 305 + bio_iter_offset(bio, iter)); 306 segment[corrupt_bio_byte] = fc->corrupt_bio_value; 307 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 308 "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", 309 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 310 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, 311 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); 312 break; 313 } 314 corrupt_bio_byte -= bio_iter_len(bio, iter); 315 } 316 } 317 318 static int flakey_map(struct dm_target *ti, struct bio *bio) 319 { 320 struct flakey_c *fc = ti->private; 321 unsigned elapsed; 322 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 323 pb->bio_submitted = false; 324 325 /* Do not fail reset zone */ 326 if (bio_op(bio) == REQ_OP_ZONE_RESET) 327 goto map_bio; 328 329 /* Are we alive ? */ 330 elapsed = (jiffies - fc->start_time) / HZ; 331 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { 332 /* 333 * Flag this bio as submitted while down. 334 */ 335 pb->bio_submitted = true; 336 337 /* 338 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set. 339 * Otherwise, flakey_end_io() will decide if the reads should be modified. 340 */ 341 if (bio_data_dir(bio) == READ) { 342 if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) && 343 !test_bit(ERROR_WRITES, &fc->flags)) 344 return DM_MAPIO_KILL; 345 goto map_bio; 346 } 347 348 /* 349 * Drop or error writes? 350 */ 351 if (test_bit(DROP_WRITES, &fc->flags)) { 352 bio_endio(bio); 353 return DM_MAPIO_SUBMITTED; 354 } 355 else if (test_bit(ERROR_WRITES, &fc->flags)) { 356 bio_io_error(bio); 357 return DM_MAPIO_SUBMITTED; 358 } 359 360 /* 361 * Corrupt matching writes. 362 */ 363 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { 364 if (all_corrupt_bio_flags_match(bio, fc)) 365 corrupt_bio_data(bio, fc); 366 goto map_bio; 367 } 368 369 /* 370 * By default, error all I/O. 371 */ 372 return DM_MAPIO_KILL; 373 } 374 375 map_bio: 376 flakey_map_bio(ti, bio); 377 378 return DM_MAPIO_REMAPPED; 379 } 380 381 static int flakey_end_io(struct dm_target *ti, struct bio *bio, 382 blk_status_t *error) 383 { 384 struct flakey_c *fc = ti->private; 385 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 386 387 if (bio_op(bio) == REQ_OP_ZONE_RESET) 388 return DM_ENDIO_DONE; 389 390 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { 391 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && 392 all_corrupt_bio_flags_match(bio, fc)) { 393 /* 394 * Corrupt successful matching READs while in down state. 395 */ 396 corrupt_bio_data(bio, fc); 397 398 } else if (!test_bit(DROP_WRITES, &fc->flags) && 399 !test_bit(ERROR_WRITES, &fc->flags)) { 400 /* 401 * Error read during the down_interval if drop_writes 402 * and error_writes were not configured. 403 */ 404 *error = BLK_STS_IOERR; 405 } 406 } 407 408 return DM_ENDIO_DONE; 409 } 410 411 static void flakey_status(struct dm_target *ti, status_type_t type, 412 unsigned status_flags, char *result, unsigned maxlen) 413 { 414 unsigned sz = 0; 415 struct flakey_c *fc = ti->private; 416 unsigned drop_writes, error_writes; 417 418 switch (type) { 419 case STATUSTYPE_INFO: 420 result[0] = '\0'; 421 break; 422 423 case STATUSTYPE_TABLE: 424 DMEMIT("%s %llu %u %u ", fc->dev->name, 425 (unsigned long long)fc->start, fc->up_interval, 426 fc->down_interval); 427 428 drop_writes = test_bit(DROP_WRITES, &fc->flags); 429 error_writes = test_bit(ERROR_WRITES, &fc->flags); 430 DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5); 431 432 if (drop_writes) 433 DMEMIT("drop_writes "); 434 else if (error_writes) 435 DMEMIT("error_writes "); 436 437 if (fc->corrupt_bio_byte) 438 DMEMIT("corrupt_bio_byte %u %c %u %u ", 439 fc->corrupt_bio_byte, 440 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', 441 fc->corrupt_bio_value, fc->corrupt_bio_flags); 442 443 break; 444 } 445 } 446 447 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) 448 { 449 struct flakey_c *fc = ti->private; 450 451 *bdev = fc->dev->bdev; 452 453 /* 454 * Only pass ioctls through if the device sizes match exactly. 455 */ 456 if (fc->start || 457 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) 458 return 1; 459 return 0; 460 } 461 462 #ifdef CONFIG_BLK_DEV_ZONED 463 static int flakey_report_zones(struct dm_target *ti, sector_t sector, 464 struct blk_zone *zones, unsigned int *nr_zones) 465 { 466 struct flakey_c *fc = ti->private; 467 int ret; 468 469 /* Do report and remap it */ 470 ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector), 471 zones, nr_zones); 472 if (ret != 0) 473 return ret; 474 475 if (*nr_zones) 476 dm_remap_zone_report(ti, fc->start, zones, nr_zones); 477 return 0; 478 } 479 #endif 480 481 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) 482 { 483 struct flakey_c *fc = ti->private; 484 485 return fn(ti, fc->dev, fc->start, ti->len, data); 486 } 487 488 static struct target_type flakey_target = { 489 .name = "flakey", 490 .version = {1, 5, 0}, 491 #ifdef CONFIG_BLK_DEV_ZONED 492 .features = DM_TARGET_ZONED_HM, 493 .report_zones = flakey_report_zones, 494 #endif 495 .module = THIS_MODULE, 496 .ctr = flakey_ctr, 497 .dtr = flakey_dtr, 498 .map = flakey_map, 499 .end_io = flakey_end_io, 500 .status = flakey_status, 501 .prepare_ioctl = flakey_prepare_ioctl, 502 .iterate_devices = flakey_iterate_devices, 503 }; 504 505 static int __init dm_flakey_init(void) 506 { 507 int r = dm_register_target(&flakey_target); 508 509 if (r < 0) 510 DMERR("register failed %d", r); 511 512 return r; 513 } 514 515 static void __exit dm_flakey_exit(void) 516 { 517 dm_unregister_target(&flakey_target); 518 } 519 520 /* Module hooks */ 521 module_init(dm_flakey_init); 522 module_exit(dm_flakey_exit); 523 524 MODULE_DESCRIPTION(DM_NAME " flakey target"); 525 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 526 MODULE_LICENSE("GPL"); 527