xref: /freebsd/sys/contrib/openzfs/module/zfs/zio_inject.c (revision c6767dc1f236f20eecd75790afd42829345153da)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24  * Copyright (c) 2017, Intel Corporation.
25  * Copyright (c) 2024-2025, Klara, Inc.
26  */
27 
28 /*
29  * ZFS fault injection
30  *
31  * To handle fault injection, we keep track of a series of zinject_record_t
32  * structures which describe which logical block(s) should be injected with a
33  * fault.  These are kept in a global list.  Each record corresponds to a given
34  * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
35  * or exported while the injection record exists.
36  *
37  * Device level injection is done using the 'zi_guid' field.  If this is set, it
38  * means that the error is destined for a particular device, not a piece of
39  * data.
40  *
41  * This is a rather poor data structure and algorithm, but we don't expect more
42  * than a few faults at any one time, so it should be sufficient for our needs.
43  */
44 
45 #include <sys/arc.h>
46 #include <sys/zio.h>
47 #include <sys/zfs_ioctl.h>
48 #include <sys/vdev_impl.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/dsl_dataset.h>
51 #include <sys/fs/zfs.h>
52 
53 uint32_t zio_injection_enabled = 0;
54 
55 /*
56  * Data describing each zinject handler registered on the system, and
57  * contains the list node linking the handler in the global zinject
58  * handler list.
59  */
60 typedef struct inject_handler {
61 	int			zi_id;
62 	spa_t			*zi_spa;
63 	char			*zi_spa_name; /* ZINJECT_DELAY_IMPORT only */
64 	zinject_record_t	zi_record;
65 	uint64_t		*zi_lanes;
66 	int			zi_next_lane;
67 	list_node_t		zi_link;
68 } inject_handler_t;
69 
70 /*
71  * List of all zinject handlers registered on the system, protected by
72  * the inject_lock defined below.
73  */
74 static list_t inject_handlers;
75 
76 /*
77  * This protects insertion into, and traversal of, the inject handler
78  * list defined above; as well as the inject_delay_count. Any time a
79  * handler is inserted or removed from the list, this lock should be
80  * taken as a RW_WRITER; and any time traversal is done over the list
81  * (without modification to it) this lock should be taken as a RW_READER.
82  */
83 static krwlock_t inject_lock;
84 
85 /*
86  * This holds the number of zinject delay handlers that have been
87  * registered on the system. It is protected by the inject_lock defined
88  * above. Thus modifications to this count must be a RW_WRITER of the
89  * inject_lock, and reads of this count must be (at least) a RW_READER
90  * of the lock.
91  */
92 static int inject_delay_count = 0;
93 
94 /*
95  * This lock is used only in zio_handle_io_delay(), refer to the comment
96  * in that function for more details.
97  */
98 static kmutex_t inject_delay_mtx;
99 
100 /*
101  * Used to assign unique identifying numbers to each new zinject handler.
102  */
103 static int inject_next_id = 1;
104 
105 /*
106  * Test if the requested frequency was triggered
107  */
108 static boolean_t
109 freq_triggered(uint32_t frequency)
110 {
111 	/*
112 	 * zero implies always (100%)
113 	 */
114 	if (frequency == 0)
115 		return (B_TRUE);
116 
117 	/*
118 	 * Note: we still handle legacy (unscaled) frequency values
119 	 */
120 	uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;
121 
122 	return (random_in_range(maximum) < frequency);
123 }
124 
125 /*
126  * Returns true if the given record matches the I/O in progress.
127  */
128 static boolean_t
129 zio_match_handler(const zbookmark_phys_t *zb, uint64_t type, int dva,
130     zinject_record_t *record, int error)
131 {
132 	boolean_t matched = B_FALSE;
133 	boolean_t injected = B_FALSE;
134 
135 	/*
136 	 * Check for a match against the MOS, which is based on type
137 	 */
138 	if (zb->zb_objset == DMU_META_OBJSET &&
139 	    record->zi_objset == DMU_META_OBJSET &&
140 	    record->zi_object == DMU_META_DNODE_OBJECT) {
141 		if (record->zi_type == DMU_OT_NONE ||
142 		    type == record->zi_type)
143 			matched = B_TRUE;
144 		goto done;
145 	}
146 
147 	/*
148 	 * Check for an exact match.
149 	 */
150 	if (zb->zb_objset == record->zi_objset &&
151 	    zb->zb_object == record->zi_object &&
152 	    zb->zb_level == record->zi_level &&
153 	    zb->zb_blkid >= record->zi_start &&
154 	    zb->zb_blkid <= record->zi_end &&
155 	    (record->zi_dvas == 0 ||
156 	    (dva != ZI_NO_DVA && (record->zi_dvas & (1ULL << dva)))) &&
157 	    error == record->zi_error) {
158 		matched = B_TRUE;
159 		goto done;
160 	}
161 
162 done:
163 	if (matched) {
164 		record->zi_match_count++;
165 		injected = freq_triggered(record->zi_freq);
166 	}
167 
168 	if (injected)
169 		record->zi_inject_count++;
170 
171 	return (injected);
172 }
173 
174 /*
175  * Panic the system when a config change happens in the function
176  * specified by tag.
177  */
178 void
179 zio_handle_panic_injection(spa_t *spa, const char *tag, uint64_t type)
180 {
181 	inject_handler_t *handler;
182 
183 	rw_enter(&inject_lock, RW_READER);
184 
185 	for (handler = list_head(&inject_handlers); handler != NULL;
186 	    handler = list_next(&inject_handlers, handler)) {
187 
188 		if (spa != handler->zi_spa)
189 			continue;
190 
191 		if (handler->zi_record.zi_type == type &&
192 		    strcmp(tag, handler->zi_record.zi_func) == 0) {
193 			handler->zi_record.zi_match_count++;
194 			handler->zi_record.zi_inject_count++;
195 			panic("Panic requested in function %s\n", tag);
196 		}
197 	}
198 
199 	rw_exit(&inject_lock);
200 }
201 
202 /*
203  * Inject a decryption failure. Decryption failures can occur in
204  * both the ARC and the ZIO layers.
205  */
206 int
207 zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
208     uint64_t type, int error)
209 {
210 	int ret = 0;
211 	inject_handler_t *handler;
212 
213 	rw_enter(&inject_lock, RW_READER);
214 
215 	for (handler = list_head(&inject_handlers); handler != NULL;
216 	    handler = list_next(&inject_handlers, handler)) {
217 
218 		if (spa != handler->zi_spa ||
219 		    handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
220 			continue;
221 
222 		if (zio_match_handler(zb, type, ZI_NO_DVA,
223 		    &handler->zi_record, error)) {
224 			ret = error;
225 			break;
226 		}
227 	}
228 
229 	rw_exit(&inject_lock);
230 	return (ret);
231 }
232 
233 /*
234  * If this is a physical I/O for a vdev child determine which DVA it is
235  * for. We iterate backwards through the DVAs matching on the offset so
236  * that we end up with ZI_NO_DVA (-1) if we don't find a match.
237  */
238 static int
239 zio_match_dva(zio_t *zio)
240 {
241 	int i = ZI_NO_DVA;
242 
243 	if (zio->io_bp != NULL && zio->io_vd != NULL &&
244 	    zio->io_child_type == ZIO_CHILD_VDEV) {
245 		for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) {
246 			dva_t *dva = &zio->io_bp->blk_dva[i];
247 			uint64_t off = DVA_GET_OFFSET(dva);
248 			vdev_t *vd = vdev_lookup_top(zio->io_spa,
249 			    DVA_GET_VDEV(dva));
250 
251 			/* Compensate for vdev label added to leaves */
252 			if (zio->io_vd->vdev_ops->vdev_op_leaf)
253 				off += VDEV_LABEL_START_SIZE;
254 
255 			if (zio->io_vd == vd && zio->io_offset == off)
256 				break;
257 		}
258 	}
259 
260 	return (i);
261 }
262 
263 
264 /*
265  * Determine if the I/O in question should return failure.  Returns the errno
266  * to be returned to the caller.
267  */
268 int
269 zio_handle_fault_injection(zio_t *zio, int error)
270 {
271 	int ret = 0;
272 	inject_handler_t *handler;
273 
274 	/*
275 	 * Ignore I/O not associated with any logical data.
276 	 */
277 	if (zio->io_logical == NULL)
278 		return (0);
279 
280 	/*
281 	 * Currently, we only support fault injection on reads.
282 	 */
283 	if (zio->io_type != ZIO_TYPE_READ)
284 		return (0);
285 
286 	/*
287 	 * A rebuild I/O has no checksum to verify.
288 	 */
289 	if (zio->io_priority == ZIO_PRIORITY_REBUILD && error == ECKSUM)
290 		return (0);
291 
292 	rw_enter(&inject_lock, RW_READER);
293 
294 	for (handler = list_head(&inject_handlers); handler != NULL;
295 	    handler = list_next(&inject_handlers, handler)) {
296 		if (zio->io_spa != handler->zi_spa ||
297 		    handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
298 			continue;
299 
300 		/* If this handler matches, return the specified error */
301 		if (zio_match_handler(&zio->io_logical->io_bookmark,
302 		    zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
303 		    zio_match_dva(zio), &handler->zi_record, error)) {
304 			ret = error;
305 			break;
306 		}
307 	}
308 
309 	rw_exit(&inject_lock);
310 
311 	return (ret);
312 }
313 
314 /*
315  * Determine if the zio is part of a label update and has an injection
316  * handler associated with that portion of the label. Currently, we
317  * allow error injection in either the nvlist or the uberblock region of
318  * of the vdev label.
319  */
320 int
321 zio_handle_label_injection(zio_t *zio, int error)
322 {
323 	inject_handler_t *handler;
324 	vdev_t *vd = zio->io_vd;
325 	uint64_t offset = zio->io_offset;
326 	int label;
327 	int ret = 0;
328 
329 	if (offset >= VDEV_LABEL_START_SIZE &&
330 	    offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
331 		return (0);
332 
333 	rw_enter(&inject_lock, RW_READER);
334 
335 	for (handler = list_head(&inject_handlers); handler != NULL;
336 	    handler = list_next(&inject_handlers, handler)) {
337 		uint64_t start = handler->zi_record.zi_start;
338 		uint64_t end = handler->zi_record.zi_end;
339 
340 		if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
341 			continue;
342 
343 		/*
344 		 * The injection region is the relative offsets within a
345 		 * vdev label. We must determine the label which is being
346 		 * updated and adjust our region accordingly.
347 		 */
348 		label = vdev_label_number(vd->vdev_psize, offset);
349 		start = vdev_label_offset(vd->vdev_psize, label, start);
350 		end = vdev_label_offset(vd->vdev_psize, label, end);
351 
352 		if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
353 		    (offset >= start && offset <= end)) {
354 			handler->zi_record.zi_match_count++;
355 			handler->zi_record.zi_inject_count++;
356 			ret = error;
357 			break;
358 		}
359 	}
360 	rw_exit(&inject_lock);
361 	return (ret);
362 }
363 
364 static int
365 zio_inject_bitflip_cb(void *data, size_t len, void *private)
366 {
367 	zio_t *zio = private;
368 	uint8_t *buffer = data;
369 	uint_t byte = random_in_range(len);
370 
371 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
372 
373 	/* flip a single random bit in an abd data buffer */
374 	buffer[byte] ^= 1 << random_in_range(8);
375 
376 	return (1);	/* stop after first flip */
377 }
378 
379 /* Test if this zio matches the iotype from the injection record. */
380 static boolean_t
381 zio_match_iotype(zio_t *zio, uint32_t iotype)
382 {
383 	ASSERT3P(zio, !=, NULL);
384 
385 	/* Unknown iotype, maybe from a newer version of zinject. Reject it. */
386 	if (iotype >= ZINJECT_IOTYPES)
387 		return (B_FALSE);
388 
389 	/* Probe IOs only match IOTYPE_PROBE, regardless of their type. */
390 	if (zio->io_flags & ZIO_FLAG_PROBE)
391 		return (iotype == ZINJECT_IOTYPE_PROBE);
392 
393 	/* Standard IO types, match against ZIO type. */
394 	if (iotype < ZINJECT_IOTYPE_ALL)
395 		return (iotype == zio->io_type);
396 
397 	/* Match any standard IO type. */
398 	if (iotype == ZINJECT_IOTYPE_ALL)
399 		return (B_TRUE);
400 
401 	return (B_FALSE);
402 }
403 
404 static int
405 zio_handle_device_injection_impl(vdev_t *vd, zio_t *zio, int err1, int err2)
406 {
407 	inject_handler_t *handler;
408 	int ret = 0;
409 
410 	/*
411 	 * We skip over faults in the labels unless it's during device open
412 	 * (i.e. zio == NULL) or a device flush (offset is meaningless). We let
413 	 * probe IOs through so we can match them to probe inject records.
414 	 */
415 	if (zio != NULL && zio->io_type != ZIO_TYPE_FLUSH &&
416 	    !(zio->io_flags & ZIO_FLAG_PROBE)) {
417 		uint64_t offset = zio->io_offset;
418 
419 		if (offset < VDEV_LABEL_START_SIZE ||
420 		    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
421 			return (0);
422 	}
423 
424 	rw_enter(&inject_lock, RW_READER);
425 
426 	for (handler = list_head(&inject_handlers); handler != NULL;
427 	    handler = list_next(&inject_handlers, handler)) {
428 
429 		if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
430 			continue;
431 
432 		if (vd->vdev_guid == handler->zi_record.zi_guid) {
433 			if (handler->zi_record.zi_failfast &&
434 			    (zio == NULL || (zio->io_flags &
435 			    (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
436 				continue;
437 			}
438 
439 			/* Handle type specific I/O failures */
440 			if (zio != NULL && !zio_match_iotype(zio,
441 			    handler->zi_record.zi_iotype))
442 				continue;
443 
444 			if (handler->zi_record.zi_error == err1 ||
445 			    handler->zi_record.zi_error == err2) {
446 				handler->zi_record.zi_match_count++;
447 
448 				/*
449 				 * limit error injection if requested
450 				 */
451 				if (!freq_triggered(handler->zi_record.zi_freq))
452 					continue;
453 
454 				handler->zi_record.zi_inject_count++;
455 
456 				/*
457 				 * For a failed open, pretend like the device
458 				 * has gone away.
459 				 */
460 				if (err1 == ENXIO)
461 					vd->vdev_stat.vs_aux =
462 					    VDEV_AUX_OPEN_FAILED;
463 
464 				/*
465 				 * Treat these errors as if they had been
466 				 * retried so that all the appropriate stats
467 				 * and FMA events are generated.
468 				 */
469 				if (!handler->zi_record.zi_failfast &&
470 				    zio != NULL)
471 					zio->io_flags |= ZIO_FLAG_IO_RETRY;
472 
473 				/*
474 				 * EILSEQ means flip a bit after a read
475 				 */
476 				if (handler->zi_record.zi_error == EILSEQ) {
477 					if (zio == NULL)
478 						break;
479 
480 					/* locate buffer data and flip a bit */
481 					(void) abd_iterate_func(zio->io_abd, 0,
482 					    zio->io_size, zio_inject_bitflip_cb,
483 					    zio);
484 					break;
485 				}
486 
487 				ret = handler->zi_record.zi_error;
488 				break;
489 			}
490 			if (handler->zi_record.zi_error == ENXIO) {
491 				handler->zi_record.zi_match_count++;
492 				handler->zi_record.zi_inject_count++;
493 				ret = SET_ERROR(EIO);
494 				break;
495 			}
496 		}
497 	}
498 
499 	rw_exit(&inject_lock);
500 
501 	return (ret);
502 }
503 
504 int
505 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
506 {
507 	return (zio_handle_device_injection_impl(vd, zio, error, INT_MAX));
508 }
509 
510 int
511 zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1, int err2)
512 {
513 	return (zio_handle_device_injection_impl(vd, zio, err1, err2));
514 }
515 
516 /*
517  * Simulate hardware that ignores cache flushes.  For requested number
518  * of seconds nix the actual writing to disk.
519  */
520 void
521 zio_handle_ignored_writes(zio_t *zio)
522 {
523 	inject_handler_t *handler;
524 
525 	rw_enter(&inject_lock, RW_READER);
526 
527 	for (handler = list_head(&inject_handlers); handler != NULL;
528 	    handler = list_next(&inject_handlers, handler)) {
529 
530 		/* Ignore errors not destined for this pool */
531 		if (zio->io_spa != handler->zi_spa ||
532 		    handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
533 			continue;
534 
535 		handler->zi_record.zi_match_count++;
536 
537 		/*
538 		 * Positive duration implies # of seconds, negative
539 		 * a number of txgs
540 		 */
541 		if (handler->zi_record.zi_timer == 0) {
542 			if (handler->zi_record.zi_duration > 0)
543 				handler->zi_record.zi_timer = ddi_get_lbolt64();
544 			else
545 				handler->zi_record.zi_timer = zio->io_txg;
546 		}
547 
548 		/* Have a "problem" writing 60% of the time */
549 		if (random_in_range(100) < 60) {
550 			handler->zi_record.zi_inject_count++;
551 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
552 		}
553 		break;
554 	}
555 
556 	rw_exit(&inject_lock);
557 }
558 
559 void
560 spa_handle_ignored_writes(spa_t *spa)
561 {
562 	inject_handler_t *handler;
563 
564 	if (zio_injection_enabled == 0)
565 		return;
566 
567 	rw_enter(&inject_lock, RW_READER);
568 
569 	for (handler = list_head(&inject_handlers); handler != NULL;
570 	    handler = list_next(&inject_handlers, handler)) {
571 
572 		if (spa != handler->zi_spa ||
573 		    handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
574 			continue;
575 
576 		handler->zi_record.zi_match_count++;
577 		handler->zi_record.zi_inject_count++;
578 
579 		if (handler->zi_record.zi_duration > 0) {
580 			VERIFY(handler->zi_record.zi_timer == 0 ||
581 			    ddi_time_after64(
582 			    (int64_t)handler->zi_record.zi_timer +
583 			    handler->zi_record.zi_duration * hz,
584 			    ddi_get_lbolt64()));
585 		} else {
586 			/* duration is negative so the subtraction here adds */
587 			VERIFY(handler->zi_record.zi_timer == 0 ||
588 			    handler->zi_record.zi_timer -
589 			    handler->zi_record.zi_duration >=
590 			    spa_syncing_txg(spa));
591 		}
592 	}
593 
594 	rw_exit(&inject_lock);
595 }
596 
597 hrtime_t
598 zio_handle_io_delay(zio_t *zio)
599 {
600 	vdev_t *vd = zio->io_vd;
601 	inject_handler_t *min_handler = NULL;
602 	hrtime_t min_target = 0;
603 
604 	rw_enter(&inject_lock, RW_READER);
605 
606 	/*
607 	 * inject_delay_count is a subset of zio_injection_enabled that
608 	 * is only incremented for delay handlers. These checks are
609 	 * mainly added to remind the reader why we're not explicitly
610 	 * checking zio_injection_enabled like the other functions.
611 	 */
612 	IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
613 	IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
614 
615 	/*
616 	 * If there aren't any inject delay handlers registered, then we
617 	 * can short circuit and simply return 0 here. A value of zero
618 	 * informs zio_delay_interrupt() that this request should not be
619 	 * delayed. This short circuit keeps us from acquiring the
620 	 * inject_delay_mutex unnecessarily.
621 	 */
622 	if (inject_delay_count == 0) {
623 		rw_exit(&inject_lock);
624 		return (0);
625 	}
626 
627 	/*
628 	 * Each inject handler has a number of "lanes" associated with
629 	 * it. Each lane is able to handle requests independently of one
630 	 * another, and at a latency defined by the inject handler
631 	 * record's zi_timer field. Thus if a handler in configured with
632 	 * a single lane with a 10ms latency, it will delay requests
633 	 * such that only a single request is completed every 10ms. So,
634 	 * if more than one request is attempted per each 10ms interval,
635 	 * the average latency of the requests will be greater than
636 	 * 10ms; but if only a single request is submitted each 10ms
637 	 * interval the average latency will be 10ms.
638 	 *
639 	 * We need to acquire this mutex to prevent multiple concurrent
640 	 * threads being assigned to the same lane of a given inject
641 	 * handler. The mutex allows us to perform the following two
642 	 * operations atomically:
643 	 *
644 	 *	1. determine the minimum handler and minimum target
645 	 *	   value of all the possible handlers
646 	 *	2. update that minimum handler's lane array
647 	 *
648 	 * Without atomicity, two (or more) threads could pick the same
649 	 * lane in step (1), and then conflict with each other in step
650 	 * (2). This could allow a single lane handler to process
651 	 * multiple requests simultaneously, which shouldn't be possible.
652 	 */
653 	mutex_enter(&inject_delay_mtx);
654 
655 	for (inject_handler_t *handler = list_head(&inject_handlers);
656 	    handler != NULL; handler = list_next(&inject_handlers, handler)) {
657 		if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
658 			continue;
659 
660 		if (vd->vdev_guid != handler->zi_record.zi_guid)
661 			continue;
662 
663 		/* also match on I/O type (e.g., -T read) */
664 		if (!zio_match_iotype(zio, handler->zi_record.zi_iotype))
665 			continue;
666 
667 		/*
668 		 * Defensive; should never happen as the array allocation
669 		 * occurs prior to inserting this handler on the list.
670 		 */
671 		ASSERT3P(handler->zi_lanes, !=, NULL);
672 
673 		/*
674 		 * This should never happen, the zinject command should
675 		 * prevent a user from setting an IO delay with zero lanes.
676 		 */
677 		ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
678 
679 		ASSERT3U(handler->zi_record.zi_nlanes, >,
680 		    handler->zi_next_lane);
681 
682 		handler->zi_record.zi_match_count++;
683 
684 		/* Limit the use of this handler if requested */
685 		if (!freq_triggered(handler->zi_record.zi_freq))
686 			continue;
687 
688 		/*
689 		 * We want to issue this IO to the lane that will become
690 		 * idle the soonest, so we compare the soonest this
691 		 * specific handler can complete the IO with all other
692 		 * handlers, to find the lowest value of all possible
693 		 * lanes. We then use this lane to submit the request.
694 		 *
695 		 * Since each handler has a constant value for its
696 		 * delay, we can just use the "next" lane for that
697 		 * handler; as it will always be the lane with the
698 		 * lowest value for that particular handler (i.e. the
699 		 * lane that will become idle the soonest). This saves a
700 		 * scan of each handler's lanes array.
701 		 *
702 		 * There's two cases to consider when determining when
703 		 * this specific IO request should complete. If this
704 		 * lane is idle, we want to "submit" the request now so
705 		 * it will complete after zi_timer milliseconds. Thus,
706 		 * we set the target to now + zi_timer.
707 		 *
708 		 * If the lane is busy, we want this request to complete
709 		 * zi_timer milliseconds after the lane becomes idle.
710 		 * Since the 'zi_lanes' array holds the time at which
711 		 * each lane will become idle, we use that value to
712 		 * determine when this request should complete.
713 		 */
714 		hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
715 		hrtime_t busy = handler->zi_record.zi_timer +
716 		    handler->zi_lanes[handler->zi_next_lane];
717 		hrtime_t target = MAX(idle, busy);
718 
719 		if (min_handler == NULL) {
720 			min_handler = handler;
721 			min_target = target;
722 			continue;
723 		}
724 
725 		ASSERT3P(min_handler, !=, NULL);
726 		ASSERT3U(min_target, !=, 0);
727 
728 		/*
729 		 * We don't yet increment the "next lane" variable since
730 		 * we still might find a lower value lane in another
731 		 * handler during any remaining iterations. Once we're
732 		 * sure we've selected the absolute minimum, we'll claim
733 		 * the lane and increment the handler's "next lane"
734 		 * field below.
735 		 */
736 
737 		if (target < min_target) {
738 			min_handler = handler;
739 			min_target = target;
740 		}
741 	}
742 
743 	/*
744 	 * 'min_handler' will be NULL if no IO delays are registered for
745 	 * this vdev, otherwise it will point to the handler containing
746 	 * the lane that will become idle the soonest.
747 	 */
748 	if (min_handler != NULL) {
749 		ASSERT3U(min_target, !=, 0);
750 		min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
751 
752 		/*
753 		 * If we've used all possible lanes for this handler,
754 		 * loop back and start using the first lane again;
755 		 * otherwise, just increment the lane index.
756 		 */
757 		min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
758 		    min_handler->zi_record.zi_nlanes;
759 
760 		min_handler->zi_record.zi_inject_count++;
761 
762 	}
763 
764 	mutex_exit(&inject_delay_mtx);
765 	rw_exit(&inject_lock);
766 
767 	return (min_target);
768 }
769 
770 static void
771 zio_handle_pool_delay(spa_t *spa, hrtime_t elapsed, zinject_type_t command)
772 {
773 	inject_handler_t *handler;
774 	hrtime_t delay = 0;
775 	int id = 0;
776 
777 	rw_enter(&inject_lock, RW_READER);
778 
779 	for (handler = list_head(&inject_handlers);
780 	    handler != NULL && handler->zi_record.zi_cmd == command;
781 	    handler = list_next(&inject_handlers, handler)) {
782 		ASSERT3P(handler->zi_spa_name, !=, NULL);
783 		if (strcmp(spa_name(spa), handler->zi_spa_name) == 0) {
784 			handler->zi_record.zi_match_count++;
785 			uint64_t pause =
786 			    SEC2NSEC(handler->zi_record.zi_duration);
787 			if (pause > elapsed) {
788 				handler->zi_record.zi_inject_count++;
789 				delay = pause - elapsed;
790 			}
791 			id = handler->zi_id;
792 			break;
793 		}
794 	}
795 
796 	rw_exit(&inject_lock);
797 
798 	if (delay) {
799 		if (command == ZINJECT_DELAY_IMPORT) {
800 			spa_import_progress_set_notes(spa, "injecting %llu "
801 			    "sec delay", (u_longlong_t)NSEC2SEC(delay));
802 		}
803 		zfs_sleep_until(gethrtime() + delay);
804 	}
805 	if (id) {
806 		/* all done with this one-shot handler */
807 		zio_clear_fault(id);
808 	}
809 }
810 
811 /*
812  * For testing, inject a delay during an import
813  */
814 void
815 zio_handle_import_delay(spa_t *spa, hrtime_t elapsed)
816 {
817 	zio_handle_pool_delay(spa, elapsed, ZINJECT_DELAY_IMPORT);
818 }
819 
820 /*
821  * For testing, inject a delay during an export
822  */
823 void
824 zio_handle_export_delay(spa_t *spa, hrtime_t elapsed)
825 {
826 	zio_handle_pool_delay(spa, elapsed, ZINJECT_DELAY_EXPORT);
827 }
828 
829 static int
830 zio_calculate_range(const char *pool, zinject_record_t *record)
831 {
832 	dsl_pool_t *dp;
833 	dsl_dataset_t *ds;
834 	objset_t *os = NULL;
835 	dnode_t *dn = NULL;
836 	int error;
837 
838 	/*
839 	 * Obtain the dnode for object using pool, objset, and object
840 	 */
841 	error = dsl_pool_hold(pool, FTAG, &dp);
842 	if (error)
843 		return (error);
844 
845 	error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
846 	dsl_pool_rele(dp, FTAG);
847 	if (error)
848 		return (error);
849 
850 	error = dmu_objset_from_ds(ds, &os);
851 	dsl_dataset_rele(ds, FTAG);
852 	if (error)
853 		return (error);
854 
855 	error = dnode_hold(os, record->zi_object, FTAG, &dn);
856 	if (error)
857 		return (error);
858 
859 	/*
860 	 * Translate the range into block IDs
861 	 */
862 	if (record->zi_start != 0 || record->zi_end != -1ULL) {
863 		record->zi_start >>= dn->dn_datablkshift;
864 		record->zi_end >>= dn->dn_datablkshift;
865 	}
866 	if (record->zi_level > 0) {
867 		if (record->zi_level >= dn->dn_nlevels) {
868 			dnode_rele(dn, FTAG);
869 			return (SET_ERROR(EDOM));
870 		}
871 
872 		if (record->zi_start != 0 || record->zi_end != 0) {
873 			int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
874 
875 			for (int level = record->zi_level; level > 0; level--) {
876 				record->zi_start >>= shift;
877 				record->zi_end >>= shift;
878 			}
879 		}
880 	}
881 
882 	dnode_rele(dn, FTAG);
883 	return (0);
884 }
885 
886 static boolean_t
887 zio_pool_handler_exists(const char *name, zinject_type_t command)
888 {
889 	boolean_t exists = B_FALSE;
890 
891 	rw_enter(&inject_lock, RW_READER);
892 	for (inject_handler_t *handler = list_head(&inject_handlers);
893 	    handler != NULL; handler = list_next(&inject_handlers, handler)) {
894 		if (command != handler->zi_record.zi_cmd)
895 			continue;
896 
897 		const char *pool = (handler->zi_spa_name != NULL) ?
898 		    handler->zi_spa_name : spa_name(handler->zi_spa);
899 		if (strcmp(name, pool) == 0) {
900 			exists = B_TRUE;
901 			break;
902 		}
903 	}
904 	rw_exit(&inject_lock);
905 
906 	return (exists);
907 }
908 /*
909  * Create a new handler for the given record.  We add it to the list, adding
910  * a reference to the spa_t in the process.  We increment zio_injection_enabled,
911  * which is the switch to trigger all fault injection.
912  */
913 int
914 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
915 {
916 	inject_handler_t *handler;
917 	int error;
918 	spa_t *spa;
919 
920 	/*
921 	 * If this is pool-wide metadata, make sure we unload the corresponding
922 	 * spa_t, so that the next attempt to load it will trigger the fault.
923 	 * We call spa_reset() to unload the pool appropriately.
924 	 */
925 	if (flags & ZINJECT_UNLOAD_SPA)
926 		if ((error = spa_reset(name)) != 0)
927 			return (error);
928 
929 	if (record->zi_cmd == ZINJECT_DELAY_IO) {
930 		/*
931 		 * A value of zero for the number of lanes or for the
932 		 * delay time doesn't make sense.
933 		 */
934 		if (record->zi_timer == 0 || record->zi_nlanes == 0)
935 			return (SET_ERROR(EINVAL));
936 
937 		/*
938 		 * The number of lanes is directly mapped to the size of
939 		 * an array used by the handler. Thus, to ensure the
940 		 * user doesn't trigger an allocation that's "too large"
941 		 * we cap the number of lanes here.
942 		 */
943 		if (record->zi_nlanes >= UINT16_MAX)
944 			return (SET_ERROR(EINVAL));
945 	}
946 
947 	/*
948 	 * If the supplied range was in bytes -- calculate the actual blkid
949 	 */
950 	if (flags & ZINJECT_CALC_RANGE) {
951 		error = zio_calculate_range(name, record);
952 		if (error != 0)
953 			return (error);
954 	}
955 
956 	if (!(flags & ZINJECT_NULL)) {
957 		/*
958 		 * Pool delays for import or export don't take an
959 		 * injection reference on the spa. Instead they
960 		 * rely on matching by name.
961 		 */
962 		if (record->zi_cmd == ZINJECT_DELAY_IMPORT ||
963 		    record->zi_cmd == ZINJECT_DELAY_EXPORT) {
964 			if (record->zi_duration <= 0)
965 				return (SET_ERROR(EINVAL));
966 			/*
967 			 * Only one import | export delay handler per pool.
968 			 */
969 			if (zio_pool_handler_exists(name, record->zi_cmd))
970 				return (SET_ERROR(EEXIST));
971 
972 			mutex_enter(&spa_namespace_lock);
973 			boolean_t has_spa = spa_lookup(name) != NULL;
974 			mutex_exit(&spa_namespace_lock);
975 
976 			if (record->zi_cmd == ZINJECT_DELAY_IMPORT && has_spa)
977 				return (SET_ERROR(EEXIST));
978 			if (record->zi_cmd == ZINJECT_DELAY_EXPORT && !has_spa)
979 				return (SET_ERROR(ENOENT));
980 			spa = NULL;
981 		} else {
982 			/*
983 			 * spa_inject_ref() will add an injection reference,
984 			 * which will prevent the pool from being removed
985 			 * from the namespace while still allowing it to be
986 			 * unloaded.
987 			 */
988 			if ((spa = spa_inject_addref(name)) == NULL)
989 				return (SET_ERROR(ENOENT));
990 		}
991 
992 		handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
993 		handler->zi_spa = spa;	/* note: can be NULL */
994 		handler->zi_record = *record;
995 
996 		if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
997 			handler->zi_lanes = kmem_zalloc(
998 			    sizeof (*handler->zi_lanes) *
999 			    handler->zi_record.zi_nlanes, KM_SLEEP);
1000 			handler->zi_next_lane = 0;
1001 		} else {
1002 			handler->zi_lanes = NULL;
1003 			handler->zi_next_lane = 0;
1004 		}
1005 
1006 		if (handler->zi_spa == NULL)
1007 			handler->zi_spa_name = spa_strdup(name);
1008 		else
1009 			handler->zi_spa_name = NULL;
1010 
1011 		rw_enter(&inject_lock, RW_WRITER);
1012 
1013 		/*
1014 		 * We can't move this increment into the conditional
1015 		 * above because we need to hold the RW_WRITER lock of
1016 		 * inject_lock, and we don't want to hold that while
1017 		 * allocating the handler's zi_lanes array.
1018 		 */
1019 		if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1020 			ASSERT3S(inject_delay_count, >=, 0);
1021 			inject_delay_count++;
1022 			ASSERT3S(inject_delay_count, >, 0);
1023 		}
1024 
1025 		*id = handler->zi_id = inject_next_id++;
1026 		list_insert_tail(&inject_handlers, handler);
1027 		atomic_inc_32(&zio_injection_enabled);
1028 
1029 		rw_exit(&inject_lock);
1030 	}
1031 
1032 	/*
1033 	 * Flush the ARC, so that any attempts to read this data will end up
1034 	 * going to the ZIO layer.  Note that this is a little overkill, but
1035 	 * we don't have the necessary ARC interfaces to do anything else, and
1036 	 * fault injection isn't a performance critical path.
1037 	 */
1038 	if (flags & ZINJECT_FLUSH_ARC)
1039 		/*
1040 		 * We must use FALSE to ensure arc_flush returns, since
1041 		 * we're not preventing concurrent ARC insertions.
1042 		 */
1043 		arc_flush(NULL, FALSE);
1044 
1045 	return (0);
1046 }
1047 
1048 /*
1049  * Returns the next record with an ID greater than that supplied to the
1050  * function.  Used to iterate over all handlers in the system.
1051  */
1052 int
1053 zio_inject_list_next(int *id, char *name, size_t buflen,
1054     zinject_record_t *record)
1055 {
1056 	inject_handler_t *handler;
1057 	int ret;
1058 
1059 	mutex_enter(&spa_namespace_lock);
1060 	rw_enter(&inject_lock, RW_READER);
1061 
1062 	for (handler = list_head(&inject_handlers); handler != NULL;
1063 	    handler = list_next(&inject_handlers, handler))
1064 		if (handler->zi_id > *id)
1065 			break;
1066 
1067 	if (handler) {
1068 		*record = handler->zi_record;
1069 		*id = handler->zi_id;
1070 		ASSERT(handler->zi_spa || handler->zi_spa_name);
1071 		if (handler->zi_spa != NULL)
1072 			(void) strlcpy(name, spa_name(handler->zi_spa), buflen);
1073 		else
1074 			(void) strlcpy(name, handler->zi_spa_name, buflen);
1075 		ret = 0;
1076 	} else {
1077 		ret = SET_ERROR(ENOENT);
1078 	}
1079 
1080 	rw_exit(&inject_lock);
1081 	mutex_exit(&spa_namespace_lock);
1082 
1083 	return (ret);
1084 }
1085 
1086 /*
1087  * Clear the fault handler with the given identifier, or return ENOENT if none
1088  * exists.
1089  */
1090 int
1091 zio_clear_fault(int id)
1092 {
1093 	inject_handler_t *handler;
1094 
1095 	rw_enter(&inject_lock, RW_WRITER);
1096 
1097 	for (handler = list_head(&inject_handlers); handler != NULL;
1098 	    handler = list_next(&inject_handlers, handler))
1099 		if (handler->zi_id == id)
1100 			break;
1101 
1102 	if (handler == NULL) {
1103 		rw_exit(&inject_lock);
1104 		return (SET_ERROR(ENOENT));
1105 	}
1106 
1107 	if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1108 		ASSERT3S(inject_delay_count, >, 0);
1109 		inject_delay_count--;
1110 		ASSERT3S(inject_delay_count, >=, 0);
1111 	}
1112 
1113 	list_remove(&inject_handlers, handler);
1114 	rw_exit(&inject_lock);
1115 
1116 	if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
1117 		ASSERT3P(handler->zi_lanes, !=, NULL);
1118 		kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
1119 		    handler->zi_record.zi_nlanes);
1120 	} else {
1121 		ASSERT3P(handler->zi_lanes, ==, NULL);
1122 	}
1123 
1124 	if (handler->zi_spa_name != NULL)
1125 		spa_strfree(handler->zi_spa_name);
1126 
1127 	if (handler->zi_spa != NULL)
1128 		spa_inject_delref(handler->zi_spa);
1129 	kmem_free(handler, sizeof (inject_handler_t));
1130 	atomic_dec_32(&zio_injection_enabled);
1131 
1132 	return (0);
1133 }
1134 
1135 void
1136 zio_inject_init(void)
1137 {
1138 	rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
1139 	mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
1140 	list_create(&inject_handlers, sizeof (inject_handler_t),
1141 	    offsetof(inject_handler_t, zi_link));
1142 }
1143 
1144 void
1145 zio_inject_fini(void)
1146 {
1147 	list_destroy(&inject_handlers);
1148 	mutex_destroy(&inject_delay_mtx);
1149 	rw_destroy(&inject_lock);
1150 }
1151 
1152 #if defined(_KERNEL)
1153 EXPORT_SYMBOL(zio_injection_enabled);
1154 EXPORT_SYMBOL(zio_inject_fault);
1155 EXPORT_SYMBOL(zio_inject_list_next);
1156 EXPORT_SYMBOL(zio_clear_fault);
1157 EXPORT_SYMBOL(zio_handle_fault_injection);
1158 EXPORT_SYMBOL(zio_handle_device_injection);
1159 EXPORT_SYMBOL(zio_handle_label_injection);
1160 #endif
1161