xref: /linux/drivers/md/dm-flakey.c (revision 5e3992fe72748ed3892be876f09d4d990548b7af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Sistina Software (UK) Limited.
4  * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 
17 #define DM_MSG_PREFIX "flakey"
18 
19 #define all_corrupt_bio_flags_match(bio, fc)	\
20 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
21 
22 /*
23  * Flakey: Used for testing only, simulates intermittent,
24  * catastrophic device failure.
25  */
26 struct flakey_c {
27 	struct dm_dev *dev;
28 	unsigned long start_time;
29 	sector_t start;
30 	unsigned int up_interval;
31 	unsigned int down_interval;
32 	unsigned long flags;
33 	unsigned int corrupt_bio_byte;
34 	unsigned int corrupt_bio_rw;
35 	unsigned int corrupt_bio_value;
36 	blk_opf_t corrupt_bio_flags;
37 };
38 
39 enum feature_flag_bits {
40 	ERROR_READS,
41 	DROP_WRITES,
42 	ERROR_WRITES
43 };
44 
45 struct per_bio_data {
46 	bool bio_submitted;
47 };
48 
49 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
50 			  struct dm_target *ti)
51 {
52 	int r;
53 	unsigned int argc;
54 	const char *arg_name;
55 
56 	static const struct dm_arg _args[] = {
57 		{0, 7, "Invalid number of feature args"},
58 		{1, UINT_MAX, "Invalid corrupt bio byte"},
59 		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
60 		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
61 	};
62 
63 	/* No feature arguments supplied. */
64 	if (!as->argc)
65 		return 0;
66 
67 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
68 	if (r)
69 		return r;
70 
71 	while (argc) {
72 		arg_name = dm_shift_arg(as);
73 		argc--;
74 
75 		if (!arg_name) {
76 			ti->error = "Insufficient feature arguments";
77 			return -EINVAL;
78 		}
79 
80 		/*
81 		 * error_reads
82 		 */
83 		if (!strcasecmp(arg_name, "error_reads")) {
84 			if (test_and_set_bit(ERROR_READS, &fc->flags)) {
85 				ti->error = "Feature error_reads duplicated";
86 				return -EINVAL;
87 			}
88 			continue;
89 		}
90 
91 		/*
92 		 * drop_writes
93 		 */
94 		if (!strcasecmp(arg_name, "drop_writes")) {
95 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
96 				ti->error = "Feature drop_writes duplicated";
97 				return -EINVAL;
98 			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
99 				ti->error = "Feature drop_writes conflicts with feature error_writes";
100 				return -EINVAL;
101 			}
102 
103 			continue;
104 		}
105 
106 		/*
107 		 * error_writes
108 		 */
109 		if (!strcasecmp(arg_name, "error_writes")) {
110 			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
111 				ti->error = "Feature error_writes duplicated";
112 				return -EINVAL;
113 
114 			} else if (test_bit(DROP_WRITES, &fc->flags)) {
115 				ti->error = "Feature error_writes conflicts with feature drop_writes";
116 				return -EINVAL;
117 			}
118 
119 			continue;
120 		}
121 
122 		/*
123 		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
124 		 */
125 		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
126 			if (!argc) {
127 				ti->error = "Feature corrupt_bio_byte requires parameters";
128 				return -EINVAL;
129 			}
130 
131 			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
132 			if (r)
133 				return r;
134 			argc--;
135 
136 			/*
137 			 * Direction r or w?
138 			 */
139 			arg_name = dm_shift_arg(as);
140 			if (arg_name && !strcasecmp(arg_name, "w"))
141 				fc->corrupt_bio_rw = WRITE;
142 			else if (arg_name && !strcasecmp(arg_name, "r"))
143 				fc->corrupt_bio_rw = READ;
144 			else {
145 				ti->error = "Invalid corrupt bio direction (r or w)";
146 				return -EINVAL;
147 			}
148 			argc--;
149 
150 			/*
151 			 * Value of byte (0-255) to write in place of correct one.
152 			 */
153 			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
154 			if (r)
155 				return r;
156 			argc--;
157 
158 			/*
159 			 * Only corrupt bios with these flags set.
160 			 */
161 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
162 				     sizeof(unsigned int));
163 			r = dm_read_arg(_args + 3, as,
164 				(__force unsigned int *)&fc->corrupt_bio_flags,
165 				&ti->error);
166 			if (r)
167 				return r;
168 			argc--;
169 
170 			continue;
171 		}
172 
173 		ti->error = "Unrecognised flakey feature requested";
174 		return -EINVAL;
175 	}
176 
177 	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
178 		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
179 		return -EINVAL;
180 
181 	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
182 		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
183 		return -EINVAL;
184 	}
185 
186 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
187 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) {
188 		set_bit(ERROR_WRITES, &fc->flags);
189 		set_bit(ERROR_READS, &fc->flags);
190 	}
191 
192 	return 0;
193 }
194 
195 /*
196  * Construct a flakey mapping:
197  * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
198  *
199  *   Feature args:
200  *     [drop_writes]
201  *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
202  *
203  *   Nth_byte starts from 1 for the first byte.
204  *   Direction is r for READ or w for WRITE.
205  *   bio_flags is ignored if 0.
206  */
207 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
208 {
209 	static const struct dm_arg _args[] = {
210 		{0, UINT_MAX, "Invalid up interval"},
211 		{0, UINT_MAX, "Invalid down interval"},
212 	};
213 
214 	int r;
215 	struct flakey_c *fc;
216 	unsigned long long tmpll;
217 	struct dm_arg_set as;
218 	const char *devname;
219 	char dummy;
220 
221 	as.argc = argc;
222 	as.argv = argv;
223 
224 	if (argc < 4) {
225 		ti->error = "Invalid argument count";
226 		return -EINVAL;
227 	}
228 
229 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
230 	if (!fc) {
231 		ti->error = "Cannot allocate context";
232 		return -ENOMEM;
233 	}
234 	fc->start_time = jiffies;
235 
236 	devname = dm_shift_arg(&as);
237 
238 	r = -EINVAL;
239 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
240 		ti->error = "Invalid device sector";
241 		goto bad;
242 	}
243 	fc->start = tmpll;
244 
245 	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
246 	if (r)
247 		goto bad;
248 
249 	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
250 	if (r)
251 		goto bad;
252 
253 	if (!(fc->up_interval + fc->down_interval)) {
254 		ti->error = "Total (up + down) interval is zero";
255 		r = -EINVAL;
256 		goto bad;
257 	}
258 
259 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
260 		ti->error = "Interval overflow";
261 		r = -EINVAL;
262 		goto bad;
263 	}
264 
265 	r = parse_features(&as, fc, ti);
266 	if (r)
267 		goto bad;
268 
269 	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
270 	if (r) {
271 		ti->error = "Device lookup failed";
272 		goto bad;
273 	}
274 
275 	ti->num_flush_bios = 1;
276 	ti->num_discard_bios = 1;
277 	ti->per_io_data_size = sizeof(struct per_bio_data);
278 	ti->private = fc;
279 	return 0;
280 
281 bad:
282 	kfree(fc);
283 	return r;
284 }
285 
286 static void flakey_dtr(struct dm_target *ti)
287 {
288 	struct flakey_c *fc = ti->private;
289 
290 	dm_put_device(ti, fc->dev);
291 	kfree(fc);
292 }
293 
294 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
295 {
296 	struct flakey_c *fc = ti->private;
297 
298 	return fc->start + dm_target_offset(ti, bi_sector);
299 }
300 
301 static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
302 {
303 	struct flakey_c *fc = ti->private;
304 
305 	bio_set_dev(bio, fc->dev->bdev);
306 	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
307 }
308 
309 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
310 {
311 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
312 
313 	struct bvec_iter iter;
314 	struct bio_vec bvec;
315 
316 	if (!bio_has_data(bio))
317 		return;
318 
319 	/*
320 	 * Overwrite the Nth byte of the bio's data, on whichever page
321 	 * it falls.
322 	 */
323 	bio_for_each_segment(bvec, bio, iter) {
324 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
325 			char *segment;
326 			struct page *page = bio_iter_page(bio, iter);
327 			if (unlikely(page == ZERO_PAGE(0)))
328 				break;
329 			segment = bvec_kmap_local(&bvec);
330 			segment[corrupt_bio_byte] = fc->corrupt_bio_value;
331 			kunmap_local(segment);
332 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
333 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
334 				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
335 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
336 				(unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
337 			break;
338 		}
339 		corrupt_bio_byte -= bio_iter_len(bio, iter);
340 	}
341 }
342 
343 static int flakey_map(struct dm_target *ti, struct bio *bio)
344 {
345 	struct flakey_c *fc = ti->private;
346 	unsigned int elapsed;
347 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
348 
349 	pb->bio_submitted = false;
350 
351 	if (op_is_zone_mgmt(bio_op(bio)))
352 		goto map_bio;
353 
354 	/* Are we alive ? */
355 	elapsed = (jiffies - fc->start_time) / HZ;
356 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
357 		/*
358 		 * Flag this bio as submitted while down.
359 		 */
360 		pb->bio_submitted = true;
361 
362 		/*
363 		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
364 		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
365 		 */
366 		if (bio_data_dir(bio) == READ) {
367 			if (test_bit(ERROR_READS, &fc->flags))
368 				return DM_MAPIO_KILL;
369 			goto map_bio;
370 		}
371 
372 		/*
373 		 * Drop or error writes?
374 		 */
375 		if (test_bit(DROP_WRITES, &fc->flags)) {
376 			bio_endio(bio);
377 			return DM_MAPIO_SUBMITTED;
378 		} else if (test_bit(ERROR_WRITES, &fc->flags)) {
379 			bio_io_error(bio);
380 			return DM_MAPIO_SUBMITTED;
381 		}
382 
383 		/*
384 		 * Corrupt matching writes.
385 		 */
386 		if (fc->corrupt_bio_byte) {
387 			if (fc->corrupt_bio_rw == WRITE) {
388 				if (all_corrupt_bio_flags_match(bio, fc))
389 					corrupt_bio_data(bio, fc);
390 			}
391 			goto map_bio;
392 		}
393 	}
394 
395 map_bio:
396 	flakey_map_bio(ti, bio);
397 
398 	return DM_MAPIO_REMAPPED;
399 }
400 
401 static int flakey_end_io(struct dm_target *ti, struct bio *bio,
402 			 blk_status_t *error)
403 {
404 	struct flakey_c *fc = ti->private;
405 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
406 
407 	if (op_is_zone_mgmt(bio_op(bio)))
408 		return DM_ENDIO_DONE;
409 
410 	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
411 		if (fc->corrupt_bio_byte) {
412 			if ((fc->corrupt_bio_rw == READ) &&
413 			    all_corrupt_bio_flags_match(bio, fc)) {
414 				/*
415 				 * Corrupt successful matching READs while in down state.
416 				 */
417 				corrupt_bio_data(bio, fc);
418 			}
419 		}
420 		if (test_bit(ERROR_READS, &fc->flags)) {
421 			/*
422 			 * Error read during the down_interval if drop_writes
423 			 * and error_writes were not configured.
424 			 */
425 			*error = BLK_STS_IOERR;
426 		}
427 	}
428 
429 	return DM_ENDIO_DONE;
430 }
431 
432 static void flakey_status(struct dm_target *ti, status_type_t type,
433 			  unsigned int status_flags, char *result, unsigned int maxlen)
434 {
435 	unsigned int sz = 0;
436 	struct flakey_c *fc = ti->private;
437 	unsigned int error_reads, drop_writes, error_writes;
438 
439 	switch (type) {
440 	case STATUSTYPE_INFO:
441 		result[0] = '\0';
442 		break;
443 
444 	case STATUSTYPE_TABLE:
445 		DMEMIT("%s %llu %u %u", fc->dev->name,
446 		       (unsigned long long)fc->start, fc->up_interval,
447 		       fc->down_interval);
448 
449 		error_reads = test_bit(ERROR_READS, &fc->flags);
450 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
451 		error_writes = test_bit(ERROR_WRITES, &fc->flags);
452 		DMEMIT(" %u", error_reads + drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
453 
454 		if (error_reads)
455 			DMEMIT(" error_reads");
456 		if (drop_writes)
457 			DMEMIT(" drop_writes");
458 		else if (error_writes)
459 			DMEMIT(" error_writes");
460 
461 		if (fc->corrupt_bio_byte)
462 			DMEMIT(" corrupt_bio_byte %u %c %u %u",
463 			       fc->corrupt_bio_byte,
464 			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
465 			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
466 
467 		break;
468 
469 	case STATUSTYPE_IMA:
470 		result[0] = '\0';
471 		break;
472 	}
473 }
474 
475 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
476 {
477 	struct flakey_c *fc = ti->private;
478 
479 	*bdev = fc->dev->bdev;
480 
481 	/*
482 	 * Only pass ioctls through if the device sizes match exactly.
483 	 */
484 	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
485 		return 1;
486 	return 0;
487 }
488 
489 #ifdef CONFIG_BLK_DEV_ZONED
490 static int flakey_report_zones(struct dm_target *ti,
491 		struct dm_report_zones_args *args, unsigned int nr_zones)
492 {
493 	struct flakey_c *fc = ti->private;
494 
495 	return dm_report_zones(fc->dev->bdev, fc->start,
496 			       flakey_map_sector(ti, args->next_sector),
497 			       args, nr_zones);
498 }
499 #else
500 #define flakey_report_zones NULL
501 #endif
502 
503 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
504 {
505 	struct flakey_c *fc = ti->private;
506 
507 	return fn(ti, fc->dev, fc->start, ti->len, data);
508 }
509 
510 static struct target_type flakey_target = {
511 	.name   = "flakey",
512 	.version = {1, 5, 0},
513 	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
514 	.report_zones = flakey_report_zones,
515 	.module = THIS_MODULE,
516 	.ctr    = flakey_ctr,
517 	.dtr    = flakey_dtr,
518 	.map    = flakey_map,
519 	.end_io = flakey_end_io,
520 	.status = flakey_status,
521 	.prepare_ioctl = flakey_prepare_ioctl,
522 	.iterate_devices = flakey_iterate_devices,
523 };
524 module_dm(flakey);
525 
526 MODULE_DESCRIPTION(DM_NAME " flakey target");
527 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
528 MODULE_LICENSE("GPL");
529