xref: /freebsd/sys/kern/kern_fail.c (revision 0263a1a1bcdcab7c436c9ad3e27260bff845f11b)
1 /*-
2  * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 /**
26  * @file
27  *
28  * fail(9) Facility.
29  *
30  * @ingroup failpoint_private
31  */
32 /**
33  * @defgroup failpoint fail(9) Facility
34  *
35  * Failpoints allow for injecting fake errors into running code on the fly,
36  * without modifying code or recompiling with flags.  Failpoints are always
37  * present, and are very efficient when disabled.  Failpoints are described
38  * in man fail(9).
39  */
40 /**
41  * @defgroup failpoint_private Private fail(9) Implementation functions
42  *
43  * Private implementations for the actual failpoint code.
44  *
45  * @ingroup failpoint
46  */
47 /**
48  * @addtogroup failpoint_private
49  * @{
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include <sys/ctype.h>
56 #include <sys/errno.h>
57 #include <sys/fail.h>
58 #include <sys/kernel.h>
59 #include <sys/libkern.h>
60 #include <sys/limits.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/proc.h>
65 #include <sys/sbuf.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/sx.h>
68 #include <sys/sysctl.h>
69 #include <sys/types.h>
70 
71 #include <machine/atomic.h>
72 #include <machine/stdarg.h>
73 
74 #ifdef ILOG_DEFINE_FOR_FILE
75 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
76 #endif
77 
78 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
79 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
80 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
81 #define fs_free(ptr) fp_free(ptr)
82 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
83         M_WAITOK | M_ZERO)
84 
85  /**
86   * These define the wchans that are used for sleeping, pausing respectively.
87   * They are chosen arbitrarily but need to be distinct to the failpoint and
88   * the sleep/pause distinction.
89   */
90 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
91 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
92 
93 /**
94  * Don't allow more than this many entries in a fail point set by sysctl.
95  * The 99.99...% case is to have 1 entry.  I can't imagine having this many
96  * entries, so it should not limit us.  Saves on re-mallocs while holding
97  * a non-sleepable lock.
98  */
99 #define FP_MAX_ENTRY_COUNT 20
100 
101 /* Used to drain sbufs to the sysctl output */
102 int fail_sysctl_drain_func(void *, const char *, int);
103 
104 /* Head of tailq of struct fail_point_entry */
105 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
106 
107 /**
108  * fp entries garbage list; outstanding entries are cleaned up in the
109  * garbage collector
110  */
111 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
112 static struct fail_point_setting_garbage fp_setting_garbage =
113         STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
114 static struct mtx mtx_garbage_list;
115 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
116         MTX_SPIN);
117 
118 static struct sx sx_fp_set;
119 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
120 
121 /**
122  * Failpoint types.
123  * Don't change these without changing fail_type_strings in fail.c.
124  * @ingroup failpoint_private
125  */
126 enum fail_point_t {
127 	FAIL_POINT_OFF,		/**< don't fail */
128 	FAIL_POINT_PANIC,	/**< panic */
129 	FAIL_POINT_RETURN,	/**< return an errorcode */
130 	FAIL_POINT_BREAK,	/**< break into the debugger */
131 	FAIL_POINT_PRINT,	/**< print a message */
132 	FAIL_POINT_SLEEP,	/**< sleep for some msecs */
133 	FAIL_POINT_PAUSE,	/**< sleep until failpoint is set to off */
134 	FAIL_POINT_YIELD,	/**< yield the cpu */
135 	FAIL_POINT_DELAY,	/**< busy wait the cpu */
136 	FAIL_POINT_NUMTYPES,
137 	FAIL_POINT_INVALID = -1
138 };
139 
140 static struct {
141 	const char *name;
142 	int	nmlen;
143 } fail_type_strings[] = {
144 #define	FP_TYPE_NM_LEN(s)	{ s, sizeof(s) - 1 }
145 	[FAIL_POINT_OFF] =	FP_TYPE_NM_LEN("off"),
146 	[FAIL_POINT_PANIC] =	FP_TYPE_NM_LEN("panic"),
147 	[FAIL_POINT_RETURN] =	FP_TYPE_NM_LEN("return"),
148 	[FAIL_POINT_BREAK] =	FP_TYPE_NM_LEN("break"),
149 	[FAIL_POINT_PRINT] =	FP_TYPE_NM_LEN("print"),
150 	[FAIL_POINT_SLEEP] =	FP_TYPE_NM_LEN("sleep"),
151 	[FAIL_POINT_PAUSE] =	FP_TYPE_NM_LEN("pause"),
152 	[FAIL_POINT_YIELD] =	FP_TYPE_NM_LEN("yield"),
153 	[FAIL_POINT_DELAY] =	FP_TYPE_NM_LEN("delay"),
154 };
155 
156 #define FE_COUNT_UNTRACKED (INT_MIN)
157 
158 /**
159  * Internal structure tracking a single term of a complete failpoint.
160  * @ingroup failpoint_private
161  */
162 struct fail_point_entry {
163 	volatile bool	fe_stale;
164 	enum fail_point_t	fe_type;	/**< type of entry */
165 	int		fe_arg;		/**< argument to type (e.g. return value) */
166 	int		fe_prob;	/**< likelihood of firing in millionths */
167 	int		fe_count;	/**< number of times to fire, -1 means infinite */
168 	pid_t		fe_pid;		/**< only fail for this process */
169 	struct fail_point	*fe_parent;	/**< backpointer to fp */
170 	TAILQ_ENTRY(fail_point_entry)	fe_entries; /**< next entry ptr */
171 };
172 
173 struct fail_point_setting {
174 	STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
175 	struct fail_point_entry_queue fp_entry_queue;
176 	struct fail_point * fs_parent;
177 	struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
178 };
179 
180 /**
181  * Defines stating the equivalent of probablilty one (100%)
182  */
183 enum {
184 	PROB_MAX = 1000000,	/* probability between zero and this number */
185 	PROB_DIGITS = 6		/* number of zero's in above number */
186 };
187 
188 /* Get a ref on an fp's fp_setting */
189 static inline struct fail_point_setting *fail_point_setting_get_ref(
190         struct fail_point *fp);
191 /* Release a ref on an fp_setting */
192 static inline void fail_point_setting_release_ref(struct fail_point *fp);
193 /* Allocate and initialize a struct fail_point_setting */
194 static struct fail_point_setting *fail_point_setting_new(struct
195         fail_point *);
196 /* Free a struct fail_point_setting */
197 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
198 /* Allocate and initialize a struct fail_point_entry */
199 static struct fail_point_entry *fail_point_entry_new(struct
200         fail_point_setting *);
201 /* Free a struct fail_point_entry */
202 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
203 /* Append fp setting to garbage list */
204 static inline void fail_point_setting_garbage_append(
205         struct fail_point_setting *fp_setting);
206 /* Swap fp's setting with fp_setting_new */
207 static inline struct fail_point_setting *
208         fail_point_swap_settings(struct fail_point *fp,
209         struct fail_point_setting *fp_setting_new);
210 /* Free up any zero-ref setting in the garbage queue */
211 static void fail_point_garbage_collect(void);
212 /* If this fail point's setting are empty, then swap it out to NULL. */
213 static inline void fail_point_eval_swap_out(struct fail_point *fp,
214         struct fail_point_setting *fp_setting);
215 
216 bool
217 fail_point_is_off(struct fail_point *fp)
218 {
219 	bool return_val;
220 	struct fail_point_setting *fp_setting;
221 	struct fail_point_entry *ent;
222 
223 	return_val = true;
224 
225 	fp_setting = fail_point_setting_get_ref(fp);
226 	if (fp_setting != NULL) {
227 		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
228 		    fe_entries) {
229 			if (!ent->fe_stale) {
230 				return_val = false;
231 				break;
232 			}
233 		}
234 	}
235 	fail_point_setting_release_ref(fp);
236 
237 	return (return_val);
238 }
239 
240 /* Allocate and initialize a struct fail_point_setting */
241 static struct fail_point_setting *
242 fail_point_setting_new(struct fail_point *fp)
243 {
244 	struct fail_point_setting *fs_new;
245 
246 	fs_new = fs_malloc();
247 	fs_new->fs_parent = fp;
248 	TAILQ_INIT(&fs_new->fp_entry_queue);
249 	mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
250 
251 	fail_point_setting_garbage_append(fs_new);
252 
253 	return (fs_new);
254 }
255 
256 /* Free a struct fail_point_setting */
257 static void
258 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
259 {
260 	struct fail_point_entry *ent;
261 
262 	while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
263 		ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
264 		TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
265 		fail_point_entry_destroy(ent);
266 	}
267 
268 	fs_free(fp_setting);
269 }
270 
271 /* Allocate and initialize a struct fail_point_entry */
272 static struct fail_point_entry *
273 fail_point_entry_new(struct fail_point_setting *fp_setting)
274 {
275 	struct fail_point_entry *fp_entry;
276 
277 	fp_entry = fp_malloc(sizeof(struct fail_point_entry),
278 	        M_WAITOK | M_ZERO);
279 	fp_entry->fe_parent = fp_setting->fs_parent;
280 	fp_entry->fe_prob = PROB_MAX;
281 	fp_entry->fe_pid = NO_PID;
282 	fp_entry->fe_count = FE_COUNT_UNTRACKED;
283 	TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
284 	        fe_entries);
285 
286 	return (fp_entry);
287 }
288 
289 /* Free a struct fail_point_entry */
290 static void
291 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
292 {
293 
294 	fp_free(fp_entry);
295 }
296 
297 /* Get a ref on an fp's fp_setting */
298 static inline struct fail_point_setting *
299 fail_point_setting_get_ref(struct fail_point *fp)
300 {
301 	struct fail_point_setting *fp_setting;
302 
303 	/* Invariant: if we have a ref, our pointer to fp_setting is safe */
304 	atomic_add_acq_32(&fp->fp_ref_cnt, 1);
305 	fp_setting = fp->fp_setting;
306 
307 	return (fp_setting);
308 }
309 
310 /* Release a ref on an fp_setting */
311 static inline void
312 fail_point_setting_release_ref(struct fail_point *fp)
313 {
314 
315 	KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
316 	atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
317 }
318 
319 /* Append fp entries to fp garbage list */
320 static inline void
321 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
322 {
323 
324 	mtx_lock_spin(&mtx_garbage_list);
325 	STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
326 	        fs_garbage_link);
327 	mtx_unlock_spin(&mtx_garbage_list);
328 }
329 
330 /* Swap fp's entries with fp_setting_new */
331 static struct fail_point_setting *
332 fail_point_swap_settings(struct fail_point *fp,
333         struct fail_point_setting *fp_setting_new)
334 {
335 	struct fail_point_setting *fp_setting_old;
336 
337 	fp_setting_old = fp->fp_setting;
338 	fp->fp_setting = fp_setting_new;
339 
340 	return (fp_setting_old);
341 }
342 
343 static inline void
344 fail_point_eval_swap_out(struct fail_point *fp,
345         struct fail_point_setting *fp_setting)
346 {
347 
348 	/* We may have already been swapped out and replaced; ignore. */
349 	if (fp->fp_setting == fp_setting)
350 		fail_point_swap_settings(fp, NULL);
351 }
352 
353 /* Free up any zero-ref entries in the garbage queue */
354 static void
355 fail_point_garbage_collect()
356 {
357 	struct fail_point_setting *fs_current, *fs_next;
358 	struct fail_point_setting_garbage fp_ents_free_list;
359 
360 	/**
361 	  * We will transfer the entries to free to fp_ents_free_list while holding
362 	  * the spin mutex, then free it after we drop the lock. This avoids
363 	  * triggering witness due to sleepable mutexes in the memory
364 	  * allocator.
365 	  */
366 	STAILQ_INIT(&fp_ents_free_list);
367 
368 	mtx_lock_spin(&mtx_garbage_list);
369 	STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
370 	    fs_next) {
371 		if (fs_current->fs_parent->fp_setting != fs_current &&
372 		        fs_current->fs_parent->fp_ref_cnt == 0) {
373 			STAILQ_REMOVE(&fp_setting_garbage, fs_current,
374 			        fail_point_setting, fs_garbage_link);
375 			STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
376 			        fs_garbage_link);
377 		}
378 	}
379 	mtx_unlock_spin(&mtx_garbage_list);
380 
381 	STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
382 	        fs_next)
383 		fail_point_setting_destroy(fs_current);
384 }
385 
386 /* Drain out all refs from this fail point */
387 static inline void
388 fail_point_drain(struct fail_point *fp, int expected_ref)
389 {
390 	struct fail_point_setting *entries;
391 
392 	entries = fail_point_swap_settings(fp, NULL);
393 	/**
394 	 * We have unpaused all threads; so we will wait no longer
395 	 * than the time taken for the longest remaining sleep, or
396 	 * the length of time of a long-running code block.
397 	 */
398 	while (fp->fp_ref_cnt > expected_ref) {
399 		wakeup(FP_PAUSE_CHANNEL(fp));
400 		tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
401 	}
402 	fail_point_swap_settings(fp, entries);
403 }
404 
405 static inline void
406 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
407         struct mtx *mtx_sleep)
408 {
409 
410 	if (fp->fp_pre_sleep_fn)
411 		fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
412 
413 	msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
414 
415 	if (fp->fp_post_sleep_fn)
416 		fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
417 }
418 
419 static inline void
420 fail_point_sleep(struct fail_point *fp, int msecs,
421         enum fail_point_return_code *pret)
422 {
423 	int timo;
424 
425 	/* Convert from millisecs to ticks, rounding up */
426 	timo = howmany(msecs * hz, 1000);
427 
428 	if (timo > 0) {
429 		if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
430 			if (fp->fp_pre_sleep_fn)
431 				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
432 
433 			tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
434 
435 			if (fp->fp_post_sleep_fn)
436 				fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
437 		} else {
438 			if (fp->fp_pre_sleep_fn)
439 				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
440 
441 			timeout(fp->fp_post_sleep_fn, fp->fp_post_sleep_arg,
442 			        timo);
443 			*pret = FAIL_POINT_RC_QUEUED;
444 		}
445 	}
446 }
447 
448 static char *parse_fail_point(struct fail_point_setting *, char *);
449 static char *parse_term(struct fail_point_setting *, char *);
450 static char *parse_number(int *out_units, int *out_decimal, char *);
451 static char *parse_type(struct fail_point_entry *, char *);
452 
453 /**
454  * Initialize a fail_point.  The name is formed in a printf-like fashion
455  * from "fmt" and subsequent arguments.  This function is generally used
456  * for custom failpoints located at odd places in the sysctl tree, and is
457  * not explicitly needed for standard in-line-declared failpoints.
458  *
459  * @ingroup failpoint
460  */
461 void
462 fail_point_init(struct fail_point *fp, const char *fmt, ...)
463 {
464 	va_list ap;
465 	char *name;
466 	int n;
467 
468 	fp->fp_setting = NULL;
469 	fp->fp_flags = 0;
470 
471 	/* Figure out the size of the name. */
472 	va_start(ap, fmt);
473 	n = vsnprintf(NULL, 0, fmt, ap);
474 	va_end(ap);
475 
476 	/* Allocate the name and fill it in. */
477 	name = fp_malloc(n + 1, M_WAITOK);
478 	if (name != NULL) {
479 		va_start(ap, fmt);
480 		vsnprintf(name, n + 1, fmt, ap);
481 		va_end(ap);
482 	}
483 	fp->fp_name = name;
484 	fp->fp_location = "";
485 	fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
486 	fp->fp_pre_sleep_fn = NULL;
487 	fp->fp_pre_sleep_arg = NULL;
488 	fp->fp_post_sleep_fn = NULL;
489 	fp->fp_post_sleep_arg = NULL;
490 }
491 
492 /**
493  * Free the resources held by a fail_point, and wake any paused threads.
494  * Thou shalt not allow threads to hit this fail point after you enter this
495  * function, nor shall you call this multiple times for a given fp.
496  * @ingroup failpoint
497  */
498 void
499 fail_point_destroy(struct fail_point *fp)
500 {
501 
502 	fail_point_drain(fp, 0);
503 
504 	if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
505 		fp_free(__DECONST(void *, fp->fp_name));
506 		fp->fp_name = NULL;
507 	}
508 	fp->fp_flags = 0;
509 
510 	sx_xlock(&sx_fp_set);
511 	fail_point_garbage_collect();
512 	sx_xunlock(&sx_fp_set);
513 }
514 
515 /**
516  * This does the real work of evaluating a fail point. If the fail point tells
517  * us to return a value, this function returns 1 and fills in 'return_value'
518  * (return_value is allowed to be null). If the fail point tells us to panic,
519  * we never return. Otherwise we just return 0 after doing some work, which
520  * means "keep going".
521  */
522 enum fail_point_return_code
523 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
524 {
525 	bool execute = false;
526 	struct fail_point_entry *ent;
527 	struct fail_point_setting *fp_setting;
528 	enum fail_point_return_code ret;
529 	int cont;
530 	int count;
531 	int msecs;
532 	int usecs;
533 
534 	ret = FAIL_POINT_RC_CONTINUE;
535 	cont = 0; /* don't continue by default */
536 
537 	fp_setting = fail_point_setting_get_ref(fp);
538 	if (fp_setting == NULL)
539 		goto abort;
540 
541 	TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
542 
543 		if (ent->fe_stale)
544 			continue;
545 
546 		if (ent->fe_prob < PROB_MAX &&
547 		    ent->fe_prob < random() % PROB_MAX)
548 			continue;
549 
550 		if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
551 			continue;
552 
553 		if (ent->fe_count != FE_COUNT_UNTRACKED) {
554 			count = ent->fe_count;
555 			while (count > 0) {
556 				if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
557 					count--;
558 					execute = true;
559 					break;
560 				}
561 				count = ent->fe_count;
562 			}
563 			if (execute == false)
564 				/* We lost the race; consider the entry stale and bail now */
565 				continue;
566 			if (count == 0)
567 				ent->fe_stale = true;
568 		}
569 
570 		switch (ent->fe_type) {
571 		case FAIL_POINT_PANIC:
572 			panic("fail point %s panicking", fp->fp_name);
573 			/* NOTREACHED */
574 
575 		case FAIL_POINT_RETURN:
576 			if (return_value != NULL)
577 				*return_value = ent->fe_arg;
578 			ret = FAIL_POINT_RC_RETURN;
579 			break;
580 
581 		case FAIL_POINT_BREAK:
582 			printf("fail point %s breaking to debugger\n",
583 			        fp->fp_name);
584 			breakpoint();
585 			break;
586 
587 		case FAIL_POINT_PRINT:
588 			printf("fail point %s executing\n", fp->fp_name);
589 			cont = ent->fe_arg;
590 			break;
591 
592 		case FAIL_POINT_SLEEP:
593 			msecs = ent->fe_arg;
594 			if (msecs)
595 				fail_point_sleep(fp, msecs, &ret);
596 			break;
597 
598 		case FAIL_POINT_PAUSE:
599 			/**
600 			 * Pausing is inherently strange with multiple
601 			 * entries given our design.  That is because some
602 			 * entries could be unreachable, for instance in cases like:
603 			 * pause->return. We can never reach the return entry.
604 			 * The sysctl layer actually truncates all entries after
605 			 * a pause for this reason.
606 			 */
607 			mtx_lock_spin(&fp_setting->feq_mtx);
608 			fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
609 			mtx_unlock_spin(&fp_setting->feq_mtx);
610 			break;
611 
612 		case FAIL_POINT_YIELD:
613 			kern_yield(-1);
614 			break;
615 
616 		case FAIL_POINT_DELAY:
617 			usecs = ent->fe_arg;
618 			DELAY(usecs);
619 			break;
620 
621 		default:
622 			break;
623 		}
624 
625 		if (cont == 0)
626 			break;
627 	}
628 
629 	if (fail_point_is_off(fp))
630 		fail_point_eval_swap_out(fp, fp_setting);
631 
632 abort:
633 	fail_point_setting_release_ref(fp);
634 
635 	return (ret);
636 
637 }
638 
639 /**
640  * Translate internal fail_point structure into human-readable text.
641  */
642 static void
643 fail_point_get(struct fail_point *fp, struct sbuf *sb,
644         bool verbose)
645 {
646 	struct fail_point_entry *ent;
647 	struct fail_point_setting *fp_setting;
648 	struct fail_point_entry *fp_entry_cpy;
649 	int cnt_sleeping;
650 	int idx;
651 	int printed_entry_count;
652 
653 	cnt_sleeping = 0;
654 	idx = 0;
655 	printed_entry_count = 0;
656 
657 	fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
658 	        (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
659 
660 	fp_setting = fail_point_setting_get_ref(fp);
661 
662 	if (fp_setting != NULL) {
663 		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
664 			if (ent->fe_stale)
665 				continue;
666 
667 			KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
668 			        ("FP entry list larger than allowed"));
669 
670 			fp_entry_cpy[printed_entry_count] = *ent;
671 			++printed_entry_count;
672 		}
673 	}
674 	fail_point_setting_release_ref(fp);
675 
676 	/* This is our equivalent of a NULL terminator */
677 	fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
678 
679 	while (idx < printed_entry_count) {
680 		ent = &fp_entry_cpy[idx];
681 		++idx;
682 		if (ent->fe_prob < PROB_MAX) {
683 			int decimal = ent->fe_prob % (PROB_MAX / 100);
684 			int units = ent->fe_prob / (PROB_MAX / 100);
685 			sbuf_printf(sb, "%d", units);
686 			if (decimal) {
687 				int digits = PROB_DIGITS - 2;
688 				while (!(decimal % 10)) {
689 					digits--;
690 					decimal /= 10;
691 				}
692 				sbuf_printf(sb, ".%0*d", digits, decimal);
693 			}
694 			sbuf_printf(sb, "%%");
695 		}
696 		if (ent->fe_count >= 0)
697 			sbuf_printf(sb, "%d*", ent->fe_count);
698 		sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
699 		if (ent->fe_arg)
700 			sbuf_printf(sb, "(%d)", ent->fe_arg);
701 		if (ent->fe_pid != NO_PID)
702 			sbuf_printf(sb, "[pid %d]", ent->fe_pid);
703 		if (TAILQ_NEXT(ent, fe_entries))
704 			sbuf_printf(sb, "->");
705 	}
706 	if (!printed_entry_count)
707 		sbuf_printf(sb, "off");
708 
709 	fp_free(fp_entry_cpy);
710 	if (verbose) {
711 		/* Print number of sleeping threads. queue=0 is the argument
712 		 * used by msleep when sending our threads to sleep. */
713 		sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
714 		sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
715 		        &cnt_sleeping);
716 
717 		sbuf_printf(sb, "},\n");
718 		sbuf_printf(sb, "sleeping_thread_count = %d,\n",
719 		        cnt_sleeping);
720 
721 		sbuf_printf(sb, "paused_thread_stacks = {\n");
722 		sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
723 		        &cnt_sleeping);
724 
725 		sbuf_printf(sb, "},\n");
726 		sbuf_printf(sb, "paused_thread_count = %d\n",
727 		        cnt_sleeping);
728 	}
729 }
730 
731 /**
732  * Set an internal fail_point structure from a human-readable failpoint string
733  * in a lock-safe manner.
734  */
735 static int
736 fail_point_set(struct fail_point *fp, char *buf)
737 {
738 	struct fail_point_entry *ent, *ent_next;
739 	struct fail_point_setting *entries;
740 	bool should_wake_paused;
741 	bool should_truncate;
742 	int error;
743 
744 	error = 0;
745 	should_wake_paused = false;
746 	should_truncate = false;
747 
748 	/* Parse new entries. */
749 	/**
750 	 * ref protects our new malloc'd stuff from being garbage collected
751 	 * before we link it.
752 	 */
753 	fail_point_setting_get_ref(fp);
754 	entries = fail_point_setting_new(fp);
755 	if (parse_fail_point(entries, buf) == NULL) {
756 		STAILQ_REMOVE(&fp_setting_garbage, entries,
757 		        fail_point_setting, fs_garbage_link);
758 		fail_point_setting_destroy(entries);
759 		error = EINVAL;
760 		goto end;
761 	}
762 
763 	/**
764 	 * Transfer the entries we are going to keep to a new list.
765 	 * Get rid of useless zero probability entries, and entries with hit
766 	 * count 0.
767 	 * If 'off' is present, and it has no hit count set, then all entries
768 	 *       after it are discarded since they are unreachable.
769 	 */
770 	TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
771 		if (ent->fe_prob == 0 || ent->fe_count == 0) {
772 			printf("Discarding entry which cannot execute %s\n",
773 			        fail_type_strings[ent->fe_type].name);
774 			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
775 			        fe_entries);
776 			fp_free(ent);
777 			continue;
778 		} else if (should_truncate) {
779 			printf("Discarding unreachable entry %s\n",
780 			        fail_type_strings[ent->fe_type].name);
781 			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
782 			        fe_entries);
783 			fp_free(ent);
784 			continue;
785 		}
786 
787 		if (ent->fe_type == FAIL_POINT_OFF) {
788 			should_wake_paused = true;
789 			if (ent->fe_count == FE_COUNT_UNTRACKED) {
790 				should_truncate = true;
791 				TAILQ_REMOVE(&entries->fp_entry_queue, ent,
792 				        fe_entries);
793 				fp_free(ent);
794 			}
795 		} else if (ent->fe_type == FAIL_POINT_PAUSE) {
796 			should_truncate = true;
797 		} else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
798 		        FAIL_POINT_NONSLEEPABLE)) {
799 			/**
800 			 * If this fail point is annotated as being in a
801 			 * non-sleepable ctx, convert sleep to delay and
802 			 * convert the msec argument to usecs.
803 			 */
804 			printf("Sleep call request on fail point in "
805 			        "non-sleepable context; using delay instead "
806 			        "of sleep\n");
807 			ent->fe_type = FAIL_POINT_DELAY;
808 			ent->fe_arg *= 1000;
809 		}
810 	}
811 
812 	if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
813 		entries = fail_point_swap_settings(fp, NULL);
814 		if (entries != NULL)
815 			wakeup(FP_PAUSE_CHANNEL(fp));
816 	} else {
817 		if (should_wake_paused)
818 			wakeup(FP_PAUSE_CHANNEL(fp));
819 		fail_point_swap_settings(fp, entries);
820 	}
821 
822 end:
823 #ifdef IWARNING
824 	if (error)
825 		IWARNING("Failed to set %s %s to %s",
826 		    fp->fp_name, fp->fp_location, buf);
827 	else
828 		INOTICE("Set %s %s to %s",
829 		    fp->fp_name, fp->fp_location, buf);
830 #endif /* IWARNING */
831 
832 	fail_point_setting_release_ref(fp);
833 	return (error);
834 }
835 
836 #define MAX_FAIL_POINT_BUF	1023
837 
838 /**
839  * Handle kernel failpoint set/get.
840  */
841 
842 int
843 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
844 {
845 	struct fail_point *fp;
846 	char *buf;
847 	struct sbuf *sb_check;
848 	struct sbuf sb;
849 	int error;
850 
851 	error = 0;
852 	fp = arg1;
853 	buf = NULL;
854 
855 	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
856 	if (sb_check != &sb)
857 		return (ENOMEM);
858 
859 	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
860 
861 	/* Setting */
862 	/**
863 	 * Lock protects any new entries from being garbage collected before we
864 	 * can link them to the fail point.
865 	 */
866 	sx_xlock(&sx_fp_set);
867 	if (req->newptr) {
868 		if (req->newlen > MAX_FAIL_POINT_BUF) {
869 			error = EINVAL;
870 			goto out;
871 		}
872 
873 		buf = fp_malloc(req->newlen + 1, M_WAITOK);
874 
875 		error = SYSCTL_IN(req, buf, req->newlen);
876 		if (error)
877 			goto out;
878 		buf[req->newlen] = '\0';
879 
880 		error = fail_point_set(fp, buf);
881 	}
882 
883 	fail_point_garbage_collect();
884 	sx_xunlock(&sx_fp_set);
885 
886 	/* Retrieving. */
887 	fail_point_get(fp, &sb, false);
888 
889 out:
890 	sbuf_finish(&sb);
891 	sbuf_delete(&sb);
892 
893 	if (buf)
894 		fp_free(buf);
895 
896 	return (error);
897 }
898 
899 int
900 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
901 {
902 	struct fail_point *fp;
903 	struct sbuf sb, *sb_check;
904 
905 	fp = arg1;
906 
907 	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
908 	if (sb_check != &sb)
909 		return (ENOMEM);
910 
911 	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
912 
913 	/* Retrieving. */
914 	fail_point_get(fp, &sb, true);
915 
916 	sbuf_finish(&sb);
917 	sbuf_delete(&sb);
918 
919 	/**
920 	 * Lock protects any new entries from being garbage collected before we
921 	 * can link them to the fail point.
922 	 */
923 	sx_xlock(&sx_fp_set);
924 	fail_point_garbage_collect();
925 	sx_xunlock(&sx_fp_set);
926 
927 	return (0);
928 }
929 
930 int
931 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
932 {
933 	struct sysctl_req *sa;
934 	int error;
935 
936 	sa = sysctl_args;
937 
938 	error = SYSCTL_OUT(sa, buf, len);
939 
940 	if (error == ENOMEM)
941 		return (-1);
942 	else
943 		return (len);
944 }
945 
946 
947 /**
948  * Internal helper function to translate a human-readable failpoint string
949  * into a internally-parsable fail_point structure.
950  */
951 static char *
952 parse_fail_point(struct fail_point_setting *ents, char *p)
953 {
954 	/*  <fail_point> ::
955 	 *      <term> ( "->" <term> )*
956 	 */
957 	uint8_t term_count;
958 
959 	term_count = 1;
960 
961 	p = parse_term(ents, p);
962 	if (p == NULL)
963 		return (NULL);
964 
965 	while (*p != '\0') {
966 		term_count++;
967 		if (p[0] != '-' || p[1] != '>' ||
968 		        (p = parse_term(ents, p+2)) == NULL ||
969 		        term_count > FP_MAX_ENTRY_COUNT)
970 			return (NULL);
971 	}
972 	return (p);
973 }
974 
975 /**
976  * Internal helper function to parse an individual term from a failpoint.
977  */
978 static char *
979 parse_term(struct fail_point_setting *ents, char *p)
980 {
981 	struct fail_point_entry *ent;
982 
983 	ent = fail_point_entry_new(ents);
984 
985 	/*
986 	 * <term> ::
987 	 *     ( (<float> "%") | (<integer> "*" ) )*
988 	 *     <type>
989 	 *     [ "(" <integer> ")" ]
990 	 *     [ "[pid " <integer> "]" ]
991 	 */
992 
993 	/* ( (<float> "%") | (<integer> "*" ) )* */
994 	while (isdigit(*p) || *p == '.') {
995 		int units, decimal;
996 
997 		p = parse_number(&units, &decimal, p);
998 		if (p == NULL)
999 			return (NULL);
1000 
1001 		if (*p == '%') {
1002 			if (units > 100) /* prevent overflow early */
1003 				units = 100;
1004 			ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1005 			if (ent->fe_prob > PROB_MAX)
1006 				ent->fe_prob = PROB_MAX;
1007 		} else if (*p == '*') {
1008 			if (!units || units < 0 || decimal)
1009 				return (NULL);
1010 			ent->fe_count = units;
1011 		} else
1012 			return (NULL);
1013 		p++;
1014 	}
1015 
1016 	/* <type> */
1017 	p = parse_type(ent, p);
1018 	if (p == NULL)
1019 		return (NULL);
1020 	if (*p == '\0')
1021 		return (p);
1022 
1023 	/* [ "(" <integer> ")" ] */
1024 	if (*p != '(')
1025 		return (p);
1026 	p++;
1027 	if (!isdigit(*p) && *p != '-')
1028 		return (NULL);
1029 	ent->fe_arg = strtol(p, &p, 0);
1030 	if (*p++ != ')')
1031 		return (NULL);
1032 
1033 	/* [ "[pid " <integer> "]" ] */
1034 #define PID_STRING "[pid "
1035 	if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1036 		return (p);
1037 	p += sizeof(PID_STRING) - 1;
1038 	if (!isdigit(*p))
1039 		return (NULL);
1040 	ent->fe_pid = strtol(p, &p, 0);
1041 	if (*p++ != ']')
1042 		return (NULL);
1043 
1044 	return (p);
1045 }
1046 
1047 /**
1048  * Internal helper function to parse a numeric for a failpoint term.
1049  */
1050 static char *
1051 parse_number(int *out_units, int *out_decimal, char *p)
1052 {
1053 	char *old_p;
1054 
1055 	/**
1056 	 *  <number> ::
1057 	 *      <integer> [ "." <integer> ] |
1058 	 *      "." <integer>
1059 	 */
1060 
1061 	/* whole part */
1062 	old_p = p;
1063 	*out_units = strtol(p, &p, 10);
1064 	if (p == old_p && *p != '.')
1065 		return (NULL);
1066 
1067 	/* fractional part */
1068 	*out_decimal = 0;
1069 	if (*p == '.') {
1070 		int digits = 0;
1071 		p++;
1072 		while (isdigit(*p)) {
1073 			int digit = *p - '0';
1074 			if (digits < PROB_DIGITS - 2)
1075 				*out_decimal = *out_decimal * 10 + digit;
1076 			else if (digits == PROB_DIGITS - 2 && digit >= 5)
1077 				(*out_decimal)++;
1078 			digits++;
1079 			p++;
1080 		}
1081 		if (!digits) /* need at least one digit after '.' */
1082 			return (NULL);
1083 		while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1084 			*out_decimal *= 10;
1085 	}
1086 
1087 	return (p); /* success */
1088 }
1089 
1090 /**
1091  * Internal helper function to parse an individual type for a failpoint term.
1092  */
1093 static char *
1094 parse_type(struct fail_point_entry *ent, char *beg)
1095 {
1096 	enum fail_point_t type;
1097 	int len;
1098 
1099 	for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1100 		len = fail_type_strings[type].nmlen;
1101 		if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1102 			ent->fe_type = type;
1103 			return (beg + len);
1104 		}
1105 	}
1106 	return (NULL);
1107 }
1108 
1109 /* The fail point sysctl tree. */
1110 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points");
1111 
1112 /* Debugging/testing stuff for fail point */
1113 static int
1114 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1115 {
1116 
1117 	KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1118 	return (0);
1119 }
1120 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1121         CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_test_fail_point, "A",
1122         "Trigger test fail points");
1123