xref: /linux/drivers/md/dm-flakey.c (revision d632ab86aff2cef21f794e337a8e7f2320ac3973)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Sistina Software (UK) Limited.
4  * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 
17 #define DM_MSG_PREFIX "flakey"
18 
19 #define PROBABILITY_BASE	1000000000
20 
21 #define all_corrupt_bio_flags_match(bio, fc)	\
22 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
23 
24 /*
25  * Flakey: Used for testing only, simulates intermittent,
26  * catastrophic device failure.
27  */
28 struct flakey_c {
29 	struct dm_dev *dev;
30 	unsigned long start_time;
31 	sector_t start;
32 	unsigned int up_interval;
33 	unsigned int down_interval;
34 	unsigned long flags;
35 	unsigned int corrupt_bio_byte;
36 	unsigned int corrupt_bio_rw;
37 	unsigned int corrupt_bio_value;
38 	blk_opf_t corrupt_bio_flags;
39 	unsigned int random_read_corrupt;
40 	unsigned int random_write_corrupt;
41 };
42 
43 enum feature_flag_bits {
44 	ERROR_READS,
45 	DROP_WRITES,
46 	ERROR_WRITES
47 };
48 
49 struct per_bio_data {
50 	bool bio_can_corrupt;
51 	struct bvec_iter saved_iter;
52 };
53 
parse_features(struct dm_arg_set * as,struct flakey_c * fc,struct dm_target * ti)54 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
55 			  struct dm_target *ti)
56 {
57 	int r = 0;
58 	unsigned int argc = 0;
59 	const char *arg_name;
60 
61 	static const struct dm_arg _args[] = {
62 		{0, 11, "Invalid number of feature args"},
63 		{1, UINT_MAX, "Invalid corrupt bio byte"},
64 		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
65 		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
66 		{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
67 	};
68 
69 	if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
70 		return r;
71 
72 	/* No feature arguments supplied. */
73 	if (!argc)
74 		goto error_all_io;
75 
76 	while (argc) {
77 		arg_name = dm_shift_arg(as);
78 		argc--;
79 
80 		if (!arg_name) {
81 			ti->error = "Insufficient feature arguments";
82 			return -EINVAL;
83 		}
84 
85 		/*
86 		 * error_reads
87 		 */
88 		if (!strcasecmp(arg_name, "error_reads")) {
89 			if (test_and_set_bit(ERROR_READS, &fc->flags)) {
90 				ti->error = "Feature error_reads duplicated";
91 				return -EINVAL;
92 			}
93 			continue;
94 		}
95 
96 		/*
97 		 * drop_writes
98 		 */
99 		if (!strcasecmp(arg_name, "drop_writes")) {
100 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
101 				ti->error = "Feature drop_writes duplicated";
102 				return -EINVAL;
103 			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
104 				ti->error = "Feature drop_writes conflicts with feature error_writes";
105 				return -EINVAL;
106 			}
107 
108 			continue;
109 		}
110 
111 		/*
112 		 * error_writes
113 		 */
114 		if (!strcasecmp(arg_name, "error_writes")) {
115 			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
116 				ti->error = "Feature error_writes duplicated";
117 				return -EINVAL;
118 
119 			} else if (test_bit(DROP_WRITES, &fc->flags)) {
120 				ti->error = "Feature error_writes conflicts with feature drop_writes";
121 				return -EINVAL;
122 			}
123 
124 			continue;
125 		}
126 
127 		/*
128 		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
129 		 */
130 		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
131 			if (fc->corrupt_bio_byte) {
132 				ti->error = "Feature corrupt_bio_byte duplicated";
133 				return -EINVAL;
134 			} else if (argc < 4) {
135 				ti->error = "Feature corrupt_bio_byte requires 4 parameters";
136 				return -EINVAL;
137 			}
138 
139 			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
140 			if (r)
141 				return r;
142 			argc--;
143 
144 			/*
145 			 * Direction r or w?
146 			 */
147 			arg_name = dm_shift_arg(as);
148 			if (arg_name && !strcasecmp(arg_name, "w"))
149 				fc->corrupt_bio_rw = WRITE;
150 			else if (arg_name && !strcasecmp(arg_name, "r"))
151 				fc->corrupt_bio_rw = READ;
152 			else {
153 				ti->error = "Invalid corrupt bio direction (r or w)";
154 				return -EINVAL;
155 			}
156 			argc--;
157 
158 			/*
159 			 * Value of byte (0-255) to write in place of correct one.
160 			 */
161 			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
162 			if (r)
163 				return r;
164 			argc--;
165 
166 			/*
167 			 * Only corrupt bios with these flags set.
168 			 */
169 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
170 				     sizeof(unsigned int));
171 			r = dm_read_arg(_args + 3, as,
172 				(__force unsigned int *)&fc->corrupt_bio_flags,
173 				&ti->error);
174 			if (r)
175 				return r;
176 			argc--;
177 
178 			continue;
179 		}
180 
181 		if (!strcasecmp(arg_name, "random_read_corrupt")) {
182 			if (fc->random_read_corrupt) {
183 				ti->error = "Feature random_read_corrupt duplicated";
184 				return -EINVAL;
185 			} else if (!argc) {
186 				ti->error = "Feature random_read_corrupt requires a parameter";
187 				return -EINVAL;
188 			}
189 			r = dm_read_arg(_args + 4, as, &fc->random_read_corrupt, &ti->error);
190 			if (r)
191 				return r;
192 			argc--;
193 
194 			continue;
195 		}
196 
197 		if (!strcasecmp(arg_name, "random_write_corrupt")) {
198 			if (fc->random_write_corrupt) {
199 				ti->error = "Feature random_write_corrupt duplicated";
200 				return -EINVAL;
201 			} else if (!argc) {
202 				ti->error = "Feature random_write_corrupt requires a parameter";
203 				return -EINVAL;
204 			}
205 			r = dm_read_arg(_args + 4, as, &fc->random_write_corrupt, &ti->error);
206 			if (r)
207 				return r;
208 			argc--;
209 
210 			continue;
211 		}
212 
213 		ti->error = "Unrecognised flakey feature requested";
214 		return -EINVAL;
215 	}
216 
217 	if (test_bit(DROP_WRITES, &fc->flags) &&
218 	    ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
219 	     fc->random_write_corrupt)) {
220 		ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
221 		return -EINVAL;
222 
223 	} else if (test_bit(ERROR_WRITES, &fc->flags) &&
224 		   ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
225 		    fc->random_write_corrupt)) {
226 		ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
227 		return -EINVAL;
228 	} else if (test_bit(ERROR_READS, &fc->flags) &&
229 		   ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == READ) ||
230 		    fc->random_read_corrupt)) {
231 		ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
232 		return -EINVAL;
233 	}
234 
235 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
236 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
237 	    !fc->random_read_corrupt && !fc->random_write_corrupt) {
238 error_all_io:
239 		set_bit(ERROR_WRITES, &fc->flags);
240 		set_bit(ERROR_READS, &fc->flags);
241 	}
242 
243 	return 0;
244 }
245 
246 /*
247  * Construct a flakey mapping:
248  * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
249  *
250  *   Feature args:
251  *     [drop_writes]
252  *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
253  *
254  *   Nth_byte starts from 1 for the first byte.
255  *   Direction is r for READ or w for WRITE.
256  *   bio_flags is ignored if 0.
257  */
flakey_ctr(struct dm_target * ti,unsigned int argc,char ** argv)258 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
259 {
260 	static const struct dm_arg _args[] = {
261 		{0, UINT_MAX, "Invalid up interval"},
262 		{0, UINT_MAX, "Invalid down interval"},
263 	};
264 
265 	int r;
266 	struct flakey_c *fc;
267 	unsigned long long tmpll;
268 	struct dm_arg_set as;
269 	const char *devname;
270 	char dummy;
271 
272 	as.argc = argc;
273 	as.argv = argv;
274 
275 	if (argc < 4) {
276 		ti->error = "Invalid argument count";
277 		return -EINVAL;
278 	}
279 
280 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
281 	if (!fc) {
282 		ti->error = "Cannot allocate context";
283 		return -ENOMEM;
284 	}
285 	fc->start_time = jiffies;
286 
287 	devname = dm_shift_arg(&as);
288 
289 	r = -EINVAL;
290 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
291 		ti->error = "Invalid device sector";
292 		goto bad;
293 	}
294 	fc->start = tmpll;
295 
296 	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
297 	if (r)
298 		goto bad;
299 
300 	r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error);
301 	if (r)
302 		goto bad;
303 
304 	if (!(fc->up_interval + fc->down_interval)) {
305 		ti->error = "Total (up + down) interval is zero";
306 		r = -EINVAL;
307 		goto bad;
308 	}
309 
310 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
311 		ti->error = "Interval overflow";
312 		r = -EINVAL;
313 		goto bad;
314 	}
315 
316 	r = parse_features(&as, fc, ti);
317 	if (r)
318 		goto bad;
319 
320 	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
321 	if (r) {
322 		ti->error = "Device lookup failed";
323 		goto bad;
324 	}
325 
326 	ti->num_flush_bios = 1;
327 	ti->num_discard_bios = 1;
328 	ti->per_io_data_size = sizeof(struct per_bio_data);
329 	ti->private = fc;
330 	return 0;
331 
332 bad:
333 	kfree(fc);
334 	return r;
335 }
336 
flakey_dtr(struct dm_target * ti)337 static void flakey_dtr(struct dm_target *ti)
338 {
339 	struct flakey_c *fc = ti->private;
340 
341 	dm_put_device(ti, fc->dev);
342 	kfree(fc);
343 }
344 
flakey_map_sector(struct dm_target * ti,sector_t bi_sector)345 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
346 {
347 	struct flakey_c *fc = ti->private;
348 
349 	return fc->start + dm_target_offset(ti, bi_sector);
350 }
351 
flakey_map_bio(struct dm_target * ti,struct bio * bio)352 static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
353 {
354 	struct flakey_c *fc = ti->private;
355 
356 	bio_set_dev(bio, fc->dev->bdev);
357 	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
358 }
359 
corrupt_bio_common(struct bio * bio,unsigned int corrupt_bio_byte,unsigned char corrupt_bio_value,struct bvec_iter start)360 static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
361 			       unsigned char corrupt_bio_value,
362 			       struct bvec_iter start)
363 {
364 	struct bvec_iter iter;
365 	struct bio_vec bvec;
366 
367 	/*
368 	 * Overwrite the Nth byte of the bio's data, on whichever page
369 	 * it falls.
370 	 */
371 	__bio_for_each_segment(bvec, bio, iter, start) {
372 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
373 			unsigned char *segment = bvec_kmap_local(&bvec);
374 			segment[corrupt_bio_byte] = corrupt_bio_value;
375 			kunmap_local(segment);
376 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
377 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
378 				bio, corrupt_bio_value, corrupt_bio_byte,
379 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
380 				(unsigned long long)start.bi_sector,
381 				start.bi_size);
382 			break;
383 		}
384 		corrupt_bio_byte -= bio_iter_len(bio, iter);
385 	}
386 }
387 
corrupt_bio_data(struct bio * bio,struct flakey_c * fc,struct bvec_iter start)388 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
389 			     struct bvec_iter start)
390 {
391 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
392 
393 	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
394 }
395 
corrupt_bio_random(struct bio * bio,struct bvec_iter start)396 static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
397 {
398 	unsigned int corrupt_byte;
399 	unsigned char corrupt_value;
400 
401 	corrupt_byte = get_random_u32() % start.bi_size;
402 	corrupt_value = get_random_u8();
403 
404 	corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
405 }
406 
clone_free(struct bio * clone)407 static void clone_free(struct bio *clone)
408 {
409 	struct folio_iter fi;
410 
411 	if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
412 		bio_for_each_folio_all(fi, clone)
413 			folio_put(fi.folio);
414 	}
415 
416 	bio_uninit(clone);
417 	kfree(clone);
418 }
419 
clone_endio(struct bio * clone)420 static void clone_endio(struct bio *clone)
421 {
422 	struct bio *bio = clone->bi_private;
423 	bio->bi_status = clone->bi_status;
424 	clone_free(clone);
425 	bio_endio(bio);
426 }
427 
clone_bio(struct dm_target * ti,struct flakey_c * fc,struct bio * bio)428 static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
429 {
430 	struct bio *clone;
431 	unsigned size, remaining_size, nr_iovecs, order;
432 	struct bvec_iter iter = bio->bi_iter;
433 
434 	if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
435 		dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
436 
437 	size = bio->bi_iter.bi_size;
438 	nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
439 
440 	clone = bio_kmalloc(nr_iovecs, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
441 	if (!clone)
442 		return NULL;
443 
444 	bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
445 
446 	clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
447 	clone->bi_private = bio;
448 	clone->bi_end_io = clone_endio;
449 
450 	remaining_size = size;
451 
452 	order = MAX_PAGE_ORDER;
453 	while (remaining_size) {
454 		struct page *pages;
455 		unsigned size_to_add, to_copy;
456 		unsigned char *virt;
457 		unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
458 		order = min(order, remaining_order);
459 
460 retry_alloc_pages:
461 		pages = alloc_pages(GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, order);
462 		if (unlikely(!pages)) {
463 			if (order) {
464 				order--;
465 				goto retry_alloc_pages;
466 			}
467 			clone_free(clone);
468 			return NULL;
469 		}
470 		size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
471 
472 		virt = page_to_virt(pages);
473 		to_copy = size_to_add;
474 		do {
475 			struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
476 			unsigned this_step = min(bvec.bv_len, to_copy);
477 			void *map = bvec_kmap_local(&bvec);
478 			memcpy(virt, map, this_step);
479 			kunmap_local(map);
480 
481 			bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
482 			to_copy -= this_step;
483 			virt += this_step;
484 		} while (to_copy);
485 
486 		__bio_add_page(clone, pages, size_to_add, 0);
487 		remaining_size -= size_to_add;
488 	}
489 
490 	return clone;
491 }
492 
flakey_map(struct dm_target * ti,struct bio * bio)493 static int flakey_map(struct dm_target *ti, struct bio *bio)
494 {
495 	struct flakey_c *fc = ti->private;
496 	unsigned int elapsed;
497 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
498 
499 	pb->bio_can_corrupt = false;
500 
501 	if (op_is_zone_mgmt(bio_op(bio)))
502 		goto map_bio;
503 
504 	/* Are we alive ? */
505 	elapsed = (jiffies - fc->start_time) / HZ;
506 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
507 		bool corrupt_fixed, corrupt_random;
508 
509 		if (bio_has_data(bio)) {
510 			pb->bio_can_corrupt = true;
511 			pb->saved_iter = bio->bi_iter;
512 		}
513 
514 		/*
515 		 * If ERROR_READS isn't set flakey_end_io() will decide if the
516 		 * reads should be modified.
517 		 */
518 		if (bio_data_dir(bio) == READ) {
519 			if (test_bit(ERROR_READS, &fc->flags))
520 				return DM_MAPIO_KILL;
521 			goto map_bio;
522 		}
523 
524 		/*
525 		 * Drop or error writes?
526 		 */
527 		if (test_bit(DROP_WRITES, &fc->flags)) {
528 			bio_endio(bio);
529 			return DM_MAPIO_SUBMITTED;
530 		} else if (test_bit(ERROR_WRITES, &fc->flags)) {
531 			bio_io_error(bio);
532 			return DM_MAPIO_SUBMITTED;
533 		}
534 
535 		if (!pb->bio_can_corrupt)
536 			goto map_bio;
537 		/*
538 		 * Corrupt matching writes.
539 		 */
540 		corrupt_fixed = false;
541 		corrupt_random = false;
542 		if (fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) {
543 			if (all_corrupt_bio_flags_match(bio, fc))
544 				corrupt_fixed = true;
545 		}
546 		if (fc->random_write_corrupt) {
547 			u64 rnd = get_random_u64();
548 			u32 rem = do_div(rnd, PROBABILITY_BASE);
549 			if (rem < fc->random_write_corrupt)
550 				corrupt_random = true;
551 		}
552 		if (corrupt_fixed || corrupt_random) {
553 			struct bio *clone = clone_bio(ti, fc, bio);
554 			if (clone) {
555 				if (corrupt_fixed)
556 					corrupt_bio_data(clone, fc,
557 							 clone->bi_iter);
558 				if (corrupt_random)
559 					corrupt_bio_random(clone,
560 							   clone->bi_iter);
561 				submit_bio(clone);
562 				return DM_MAPIO_SUBMITTED;
563 			}
564 		}
565 	}
566 
567 map_bio:
568 	flakey_map_bio(ti, bio);
569 
570 	return DM_MAPIO_REMAPPED;
571 }
572 
flakey_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)573 static int flakey_end_io(struct dm_target *ti, struct bio *bio,
574 			 blk_status_t *error)
575 {
576 	struct flakey_c *fc = ti->private;
577 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
578 
579 	if (op_is_zone_mgmt(bio_op(bio)))
580 		return DM_ENDIO_DONE;
581 
582 	if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
583 		if (fc->corrupt_bio_byte) {
584 			if ((fc->corrupt_bio_rw == READ) &&
585 			    all_corrupt_bio_flags_match(bio, fc)) {
586 				/*
587 				 * Corrupt successful matching READs while in down state.
588 				 */
589 				corrupt_bio_data(bio, fc, pb->saved_iter);
590 			}
591 		}
592 		if (fc->random_read_corrupt) {
593 			u64 rnd = get_random_u64();
594 			u32 rem = do_div(rnd, PROBABILITY_BASE);
595 			if (rem < fc->random_read_corrupt)
596 				corrupt_bio_random(bio, pb->saved_iter);
597 		}
598 	}
599 
600 	return DM_ENDIO_DONE;
601 }
602 
flakey_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)603 static void flakey_status(struct dm_target *ti, status_type_t type,
604 			  unsigned int status_flags, char *result, unsigned int maxlen)
605 {
606 	unsigned int sz = 0;
607 	struct flakey_c *fc = ti->private;
608 	unsigned int error_reads, drop_writes, error_writes;
609 
610 	switch (type) {
611 	case STATUSTYPE_INFO:
612 		result[0] = '\0';
613 		break;
614 
615 	case STATUSTYPE_TABLE:
616 		DMEMIT("%s %llu %u %u", fc->dev->name,
617 		       (unsigned long long)fc->start, fc->up_interval,
618 		       fc->down_interval);
619 
620 		error_reads = test_bit(ERROR_READS, &fc->flags);
621 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
622 		error_writes = test_bit(ERROR_WRITES, &fc->flags);
623 		DMEMIT(" %u", error_reads + drop_writes + error_writes +
624 			(fc->corrupt_bio_byte > 0) * 5 +
625 			(fc->random_read_corrupt > 0) * 2 +
626 			(fc->random_write_corrupt > 0) * 2);
627 
628 		if (error_reads)
629 			DMEMIT(" error_reads");
630 		if (drop_writes)
631 			DMEMIT(" drop_writes");
632 		else if (error_writes)
633 			DMEMIT(" error_writes");
634 
635 		if (fc->corrupt_bio_byte)
636 			DMEMIT(" corrupt_bio_byte %u %c %u %u",
637 			       fc->corrupt_bio_byte,
638 			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
639 			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
640 
641 		if (fc->random_read_corrupt > 0)
642 			DMEMIT(" random_read_corrupt %u", fc->random_read_corrupt);
643 		if (fc->random_write_corrupt > 0)
644 			DMEMIT(" random_write_corrupt %u", fc->random_write_corrupt);
645 
646 		break;
647 
648 	case STATUSTYPE_IMA:
649 		result[0] = '\0';
650 		break;
651 	}
652 }
653 
flakey_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)654 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
655 				unsigned int cmd, unsigned long arg,
656 				bool *forward)
657 {
658 	struct flakey_c *fc = ti->private;
659 
660 	*bdev = fc->dev->bdev;
661 
662 	/*
663 	 * Only pass ioctls through if the device sizes match exactly.
664 	 */
665 	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
666 		return 1;
667 	return 0;
668 }
669 
670 #ifdef CONFIG_BLK_DEV_ZONED
flakey_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)671 static int flakey_report_zones(struct dm_target *ti,
672 		struct dm_report_zones_args *args, unsigned int nr_zones)
673 {
674 	struct flakey_c *fc = ti->private;
675 
676 	return dm_report_zones(fc->dev->bdev, fc->start,
677 			       flakey_map_sector(ti, args->next_sector),
678 			       args, nr_zones);
679 }
680 #else
681 #define flakey_report_zones NULL
682 #endif
683 
flakey_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)684 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
685 {
686 	struct flakey_c *fc = ti->private;
687 
688 	return fn(ti, fc->dev, fc->start, ti->len, data);
689 }
690 
691 static struct target_type flakey_target = {
692 	.name   = "flakey",
693 	.version = {1, 5, 0},
694 	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
695 	.report_zones = flakey_report_zones,
696 	.module = THIS_MODULE,
697 	.ctr    = flakey_ctr,
698 	.dtr    = flakey_dtr,
699 	.map    = flakey_map,
700 	.end_io = flakey_end_io,
701 	.status = flakey_status,
702 	.prepare_ioctl = flakey_prepare_ioctl,
703 	.iterate_devices = flakey_iterate_devices,
704 };
705 module_dm(flakey);
706 
707 MODULE_DESCRIPTION(DM_NAME " flakey target");
708 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
709 MODULE_LICENSE("GPL");
710