xref: /freebsd/sys/kern/kern_fail.c (revision 8eb2bee6c0f4957c6c1cea826e59cda4d18a2a64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /**
28  * @file
29  *
30  * fail(9) Facility.
31  *
32  * @ingroup failpoint_private
33  */
34 /**
35  * @defgroup failpoint fail(9) Facility
36  *
37  * Failpoints allow for injecting fake errors into running code on the fly,
38  * without modifying code or recompiling with flags.  Failpoints are always
39  * present, and are very efficient when disabled.  Failpoints are described
40  * in man fail(9).
41  */
42 /**
43  * @defgroup failpoint_private Private fail(9) Implementation functions
44  *
45  * Private implementations for the actual failpoint code.
46  *
47  * @ingroup failpoint
48  */
49 /**
50  * @addtogroup failpoint_private
51  * @{
52  */
53 
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56 
57 #include "opt_stack.h"
58 
59 #include <sys/ctype.h>
60 #include <sys/errno.h>
61 #include <sys/fail.h>
62 #include <sys/kernel.h>
63 #include <sys/libkern.h>
64 #include <sys/limits.h>
65 #include <sys/lock.h>
66 #include <sys/malloc.h>
67 #include <sys/mutex.h>
68 #include <sys/proc.h>
69 #include <sys/sbuf.h>
70 #include <sys/sleepqueue.h>
71 #include <sys/sx.h>
72 #include <sys/sysctl.h>
73 #include <sys/types.h>
74 
75 #include <machine/atomic.h>
76 #include <machine/stdarg.h>
77 
78 #ifdef ILOG_DEFINE_FOR_FILE
79 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
80 #endif
81 
82 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
83 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
84 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
85 #define fs_free(ptr) fp_free(ptr)
86 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
87     M_WAITOK | M_ZERO)
88 
89 /**
90  * These define the wchans that are used for sleeping, pausing respectively.
91  * They are chosen arbitrarily but need to be distinct to the failpoint and
92  * the sleep/pause distinction.
93  */
94 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
95 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
96 
97 /**
98  * Don't allow more than this many entries in a fail point set by sysctl.
99  * The 99.99...% case is to have 1 entry.  I can't imagine having this many
100  * entries, so it should not limit us.  Saves on re-mallocs while holding
101  * a non-sleepable lock.
102  */
103 #define FP_MAX_ENTRY_COUNT 20
104 
105 /* Used to drain sbufs to the sysctl output */
106 int fail_sysctl_drain_func(void *, const char *, int);
107 
108 /* Head of tailq of struct fail_point_entry */
109 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
110 
111 /**
112  * fp entries garbage list; outstanding entries are cleaned up in the
113  * garbage collector
114  */
115 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
116 static struct fail_point_setting_garbage fp_setting_garbage =
117         STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
118 static struct mtx mtx_garbage_list;
119 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
120         MTX_SPIN);
121 
122 static struct sx sx_fp_set;
123 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
124 
125 /**
126  * Failpoint types.
127  * Don't change these without changing fail_type_strings in fail.c.
128  * @ingroup failpoint_private
129  */
130 enum fail_point_t {
131 	FAIL_POINT_OFF,		/**< don't fail */
132 	FAIL_POINT_PANIC,	/**< panic */
133 	FAIL_POINT_RETURN,	/**< return an errorcode */
134 	FAIL_POINT_BREAK,	/**< break into the debugger */
135 	FAIL_POINT_PRINT,	/**< print a message */
136 	FAIL_POINT_SLEEP,	/**< sleep for some msecs */
137 	FAIL_POINT_PAUSE,	/**< sleep until failpoint is set to off */
138 	FAIL_POINT_YIELD,	/**< yield the cpu */
139 	FAIL_POINT_DELAY,	/**< busy wait the cpu */
140 	FAIL_POINT_NUMTYPES,
141 	FAIL_POINT_INVALID = -1
142 };
143 
144 static struct {
145 	const char *name;
146 	int	nmlen;
147 } fail_type_strings[] = {
148 #define	FP_TYPE_NM_LEN(s)	{ s, sizeof(s) - 1 }
149 	[FAIL_POINT_OFF] =	FP_TYPE_NM_LEN("off"),
150 	[FAIL_POINT_PANIC] =	FP_TYPE_NM_LEN("panic"),
151 	[FAIL_POINT_RETURN] =	FP_TYPE_NM_LEN("return"),
152 	[FAIL_POINT_BREAK] =	FP_TYPE_NM_LEN("break"),
153 	[FAIL_POINT_PRINT] =	FP_TYPE_NM_LEN("print"),
154 	[FAIL_POINT_SLEEP] =	FP_TYPE_NM_LEN("sleep"),
155 	[FAIL_POINT_PAUSE] =	FP_TYPE_NM_LEN("pause"),
156 	[FAIL_POINT_YIELD] =	FP_TYPE_NM_LEN("yield"),
157 	[FAIL_POINT_DELAY] =	FP_TYPE_NM_LEN("delay"),
158 };
159 
160 #define FE_COUNT_UNTRACKED (INT_MIN)
161 
162 /**
163  * Internal structure tracking a single term of a complete failpoint.
164  * @ingroup failpoint_private
165  */
166 struct fail_point_entry {
167 	volatile bool	fe_stale;
168 	enum fail_point_t	fe_type;	/**< type of entry */
169 	int		fe_arg;		/**< argument to type (e.g. return value) */
170 	int		fe_prob;	/**< likelihood of firing in millionths */
171 	int32_t		fe_count;	/**< number of times to fire, -1 means infinite */
172 	pid_t		fe_pid;		/**< only fail for this process */
173 	struct fail_point	*fe_parent;	/**< backpointer to fp */
174 	TAILQ_ENTRY(fail_point_entry)	fe_entries; /**< next entry ptr */
175 };
176 
177 struct fail_point_setting {
178 	STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
179 	struct fail_point_entry_queue fp_entry_queue;
180 	struct fail_point * fs_parent;
181 	struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
182 };
183 
184 /**
185  * Defines stating the equivalent of probablilty one (100%)
186  */
187 enum {
188 	PROB_MAX = 1000000,	/* probability between zero and this number */
189 	PROB_DIGITS = 6		/* number of zero's in above number */
190 };
191 
192 /* Get a ref on an fp's fp_setting */
193 static inline struct fail_point_setting *fail_point_setting_get_ref(
194         struct fail_point *fp);
195 /* Release a ref on an fp_setting */
196 static inline void fail_point_setting_release_ref(struct fail_point *fp);
197 /* Allocate and initialize a struct fail_point_setting */
198 static struct fail_point_setting *fail_point_setting_new(struct
199         fail_point *);
200 /* Free a struct fail_point_setting */
201 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
202 /* Allocate and initialize a struct fail_point_entry */
203 static struct fail_point_entry *fail_point_entry_new(struct
204         fail_point_setting *);
205 /* Free a struct fail_point_entry */
206 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
207 /* Append fp setting to garbage list */
208 static inline void fail_point_setting_garbage_append(
209         struct fail_point_setting *fp_setting);
210 /* Swap fp's setting with fp_setting_new */
211 static inline struct fail_point_setting *
212         fail_point_swap_settings(struct fail_point *fp,
213         struct fail_point_setting *fp_setting_new);
214 /* Free up any zero-ref setting in the garbage queue */
215 static void fail_point_garbage_collect(void);
216 /* If this fail point's setting are empty, then swap it out to NULL. */
217 static inline void fail_point_eval_swap_out(struct fail_point *fp,
218         struct fail_point_setting *fp_setting);
219 
220 bool
221 fail_point_is_off(struct fail_point *fp)
222 {
223 	bool return_val;
224 	struct fail_point_setting *fp_setting;
225 	struct fail_point_entry *ent;
226 
227 	return_val = true;
228 
229 	fp_setting = fail_point_setting_get_ref(fp);
230 	if (fp_setting != NULL) {
231 		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
232 		    fe_entries) {
233 			if (!ent->fe_stale) {
234 				return_val = false;
235 				break;
236 			}
237 		}
238 	}
239 	fail_point_setting_release_ref(fp);
240 
241 	return (return_val);
242 }
243 
244 /* Allocate and initialize a struct fail_point_setting */
245 static struct fail_point_setting *
246 fail_point_setting_new(struct fail_point *fp)
247 {
248 	struct fail_point_setting *fs_new;
249 
250 	fs_new = fs_malloc();
251 	fs_new->fs_parent = fp;
252 	TAILQ_INIT(&fs_new->fp_entry_queue);
253 	mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
254 
255 	fail_point_setting_garbage_append(fs_new);
256 
257 	return (fs_new);
258 }
259 
260 /* Free a struct fail_point_setting */
261 static void
262 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
263 {
264 	struct fail_point_entry *ent;
265 
266 	while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
267 		ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
268 		TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
269 		fail_point_entry_destroy(ent);
270 	}
271 
272 	fs_free(fp_setting);
273 }
274 
275 /* Allocate and initialize a struct fail_point_entry */
276 static struct fail_point_entry *
277 fail_point_entry_new(struct fail_point_setting *fp_setting)
278 {
279 	struct fail_point_entry *fp_entry;
280 
281 	fp_entry = fp_malloc(sizeof(struct fail_point_entry),
282 	        M_WAITOK | M_ZERO);
283 	fp_entry->fe_parent = fp_setting->fs_parent;
284 	fp_entry->fe_prob = PROB_MAX;
285 	fp_entry->fe_pid = NO_PID;
286 	fp_entry->fe_count = FE_COUNT_UNTRACKED;
287 	TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
288 	        fe_entries);
289 
290 	return (fp_entry);
291 }
292 
293 /* Free a struct fail_point_entry */
294 static void
295 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
296 {
297 
298 	fp_free(fp_entry);
299 }
300 
301 /* Get a ref on an fp's fp_setting */
302 static inline struct fail_point_setting *
303 fail_point_setting_get_ref(struct fail_point *fp)
304 {
305 	struct fail_point_setting *fp_setting;
306 
307 	/* Invariant: if we have a ref, our pointer to fp_setting is safe */
308 	atomic_add_acq_32(&fp->fp_ref_cnt, 1);
309 	fp_setting = fp->fp_setting;
310 
311 	return (fp_setting);
312 }
313 
314 /* Release a ref on an fp_setting */
315 static inline void
316 fail_point_setting_release_ref(struct fail_point *fp)
317 {
318 
319 	KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
320 	atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
321 }
322 
323 /* Append fp entries to fp garbage list */
324 static inline void
325 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
326 {
327 
328 	mtx_lock_spin(&mtx_garbage_list);
329 	STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
330 	        fs_garbage_link);
331 	mtx_unlock_spin(&mtx_garbage_list);
332 }
333 
334 /* Swap fp's entries with fp_setting_new */
335 static struct fail_point_setting *
336 fail_point_swap_settings(struct fail_point *fp,
337         struct fail_point_setting *fp_setting_new)
338 {
339 	struct fail_point_setting *fp_setting_old;
340 
341 	fp_setting_old = fp->fp_setting;
342 	fp->fp_setting = fp_setting_new;
343 
344 	return (fp_setting_old);
345 }
346 
347 static inline void
348 fail_point_eval_swap_out(struct fail_point *fp,
349         struct fail_point_setting *fp_setting)
350 {
351 
352 	/* We may have already been swapped out and replaced; ignore. */
353 	if (fp->fp_setting == fp_setting)
354 		fail_point_swap_settings(fp, NULL);
355 }
356 
357 /* Free up any zero-ref entries in the garbage queue */
358 static void
359 fail_point_garbage_collect(void)
360 {
361 	struct fail_point_setting *fs_current, *fs_next;
362 	struct fail_point_setting_garbage fp_ents_free_list;
363 
364 	/**
365 	  * We will transfer the entries to free to fp_ents_free_list while holding
366 	  * the spin mutex, then free it after we drop the lock. This avoids
367 	  * triggering witness due to sleepable mutexes in the memory
368 	  * allocator.
369 	  */
370 	STAILQ_INIT(&fp_ents_free_list);
371 
372 	mtx_lock_spin(&mtx_garbage_list);
373 	STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
374 	    fs_next) {
375 		if (fs_current->fs_parent->fp_setting != fs_current &&
376 		        fs_current->fs_parent->fp_ref_cnt == 0) {
377 			STAILQ_REMOVE(&fp_setting_garbage, fs_current,
378 			        fail_point_setting, fs_garbage_link);
379 			STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
380 			        fs_garbage_link);
381 		}
382 	}
383 	mtx_unlock_spin(&mtx_garbage_list);
384 
385 	STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
386 	        fs_next)
387 		fail_point_setting_destroy(fs_current);
388 }
389 
390 /* Drain out all refs from this fail point */
391 static inline void
392 fail_point_drain(struct fail_point *fp, int expected_ref)
393 {
394 	struct fail_point_setting *entries;
395 
396 	entries = fail_point_swap_settings(fp, NULL);
397 	/**
398 	 * We have unpaused all threads; so we will wait no longer
399 	 * than the time taken for the longest remaining sleep, or
400 	 * the length of time of a long-running code block.
401 	 */
402 	while (fp->fp_ref_cnt > expected_ref) {
403 		wakeup(FP_PAUSE_CHANNEL(fp));
404 		tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
405 	}
406 	if (fp->fp_callout)
407 		callout_drain(fp->fp_callout);
408 	fail_point_swap_settings(fp, entries);
409 }
410 
411 static inline void
412 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
413         struct mtx *mtx_sleep)
414 {
415 
416 	if (fp->fp_pre_sleep_fn)
417 		fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
418 
419 	msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
420 
421 	if (fp->fp_post_sleep_fn)
422 		fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
423 }
424 
425 static inline void
426 fail_point_sleep(struct fail_point *fp, int msecs,
427         enum fail_point_return_code *pret)
428 {
429 	int timo;
430 
431 	/* Convert from millisecs to ticks, rounding up */
432 	timo = howmany((int64_t)msecs * hz, 1000L);
433 
434 	if (timo > 0) {
435 		if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
436 			if (fp->fp_pre_sleep_fn)
437 				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
438 
439 			tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
440 
441 			if (fp->fp_post_sleep_fn)
442 				fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
443 		} else {
444 			if (fp->fp_pre_sleep_fn)
445 				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
446 
447 			callout_reset(fp->fp_callout, timo,
448 			    fp->fp_post_sleep_fn, fp->fp_post_sleep_arg);
449 			*pret = FAIL_POINT_RC_QUEUED;
450 		}
451 	}
452 }
453 
454 static char *parse_fail_point(struct fail_point_setting *, char *);
455 static char *parse_term(struct fail_point_setting *, char *);
456 static char *parse_number(int *out_units, int *out_decimal, char *);
457 static char *parse_type(struct fail_point_entry *, char *);
458 
459 /**
460  * Initialize a fail_point.  The name is formed in a printf-like fashion
461  * from "fmt" and subsequent arguments.  This function is generally used
462  * for custom failpoints located at odd places in the sysctl tree, and is
463  * not explicitly needed for standard in-line-declared failpoints.
464  *
465  * @ingroup failpoint
466  */
467 void
468 fail_point_init(struct fail_point *fp, const char *fmt, ...)
469 {
470 	va_list ap;
471 	char *name;
472 	int n;
473 
474 	fp->fp_setting = NULL;
475 	fp->fp_flags = 0;
476 
477 	/* Figure out the size of the name. */
478 	va_start(ap, fmt);
479 	n = vsnprintf(NULL, 0, fmt, ap);
480 	va_end(ap);
481 
482 	/* Allocate the name and fill it in. */
483 	name = fp_malloc(n + 1, M_WAITOK);
484 	if (name != NULL) {
485 		va_start(ap, fmt);
486 		vsnprintf(name, n + 1, fmt, ap);
487 		va_end(ap);
488 	}
489 	fp->fp_name = name;
490 	fp->fp_location = "";
491 	fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
492 	fp->fp_pre_sleep_fn = NULL;
493 	fp->fp_pre_sleep_arg = NULL;
494 	fp->fp_post_sleep_fn = NULL;
495 	fp->fp_post_sleep_arg = NULL;
496 }
497 
498 void
499 fail_point_alloc_callout(struct fail_point *fp)
500 {
501 
502 	/**
503 	 * This assumes that calls to fail_point_use_timeout_path()
504 	 * will not race.
505 	 */
506 	if (fp->fp_callout != NULL)
507 		return;
508 	fp->fp_callout = fp_malloc(sizeof(*fp->fp_callout), M_WAITOK);
509 	callout_init(fp->fp_callout, CALLOUT_MPSAFE);
510 }
511 
512 /**
513  * Free the resources held by a fail_point, and wake any paused threads.
514  * Thou shalt not allow threads to hit this fail point after you enter this
515  * function, nor shall you call this multiple times for a given fp.
516  * @ingroup failpoint
517  */
518 void
519 fail_point_destroy(struct fail_point *fp)
520 {
521 
522 	fail_point_drain(fp, 0);
523 
524 	if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
525 		fp_free(__DECONST(void *, fp->fp_name));
526 		fp->fp_name = NULL;
527 	}
528 	fp->fp_flags = 0;
529 	if (fp->fp_callout) {
530 		fp_free(fp->fp_callout);
531 		fp->fp_callout = NULL;
532 	}
533 
534 	sx_xlock(&sx_fp_set);
535 	fail_point_garbage_collect();
536 	sx_xunlock(&sx_fp_set);
537 }
538 
539 /**
540  * This does the real work of evaluating a fail point. If the fail point tells
541  * us to return a value, this function returns 1 and fills in 'return_value'
542  * (return_value is allowed to be null). If the fail point tells us to panic,
543  * we never return. Otherwise we just return 0 after doing some work, which
544  * means "keep going".
545  */
546 enum fail_point_return_code
547 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
548 {
549 	bool execute = false;
550 	struct fail_point_entry *ent;
551 	struct fail_point_setting *fp_setting;
552 	enum fail_point_return_code ret;
553 	int cont;
554 	int count;
555 	int msecs;
556 	int usecs;
557 
558 	ret = FAIL_POINT_RC_CONTINUE;
559 	cont = 0; /* don't continue by default */
560 
561 	fp_setting = fail_point_setting_get_ref(fp);
562 	if (fp_setting == NULL)
563 		goto abort;
564 
565 	TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
566 		if (ent->fe_stale)
567 			continue;
568 
569 		if (ent->fe_prob < PROB_MAX &&
570 		    ent->fe_prob < random() % PROB_MAX)
571 			continue;
572 
573 		if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
574 			continue;
575 
576 		if (ent->fe_count != FE_COUNT_UNTRACKED) {
577 			count = ent->fe_count;
578 			while (count > 0) {
579 				if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
580 					count--;
581 					execute = true;
582 					break;
583 				}
584 				count = ent->fe_count;
585 			}
586 			if (execute == false)
587 				/* We lost the race; consider the entry stale and bail now */
588 				continue;
589 			if (count == 0)
590 				ent->fe_stale = true;
591 		}
592 
593 		switch (ent->fe_type) {
594 		case FAIL_POINT_PANIC:
595 			panic("fail point %s panicking", fp->fp_name);
596 			/* NOTREACHED */
597 
598 		case FAIL_POINT_RETURN:
599 			if (return_value != NULL)
600 				*return_value = ent->fe_arg;
601 			ret = FAIL_POINT_RC_RETURN;
602 			break;
603 
604 		case FAIL_POINT_BREAK:
605 			printf("fail point %s breaking to debugger\n",
606 			        fp->fp_name);
607 			breakpoint();
608 			break;
609 
610 		case FAIL_POINT_PRINT:
611 			printf("fail point %s executing\n", fp->fp_name);
612 			cont = ent->fe_arg;
613 			break;
614 
615 		case FAIL_POINT_SLEEP:
616 			msecs = ent->fe_arg;
617 			if (msecs)
618 				fail_point_sleep(fp, msecs, &ret);
619 			break;
620 
621 		case FAIL_POINT_PAUSE:
622 			/**
623 			 * Pausing is inherently strange with multiple
624 			 * entries given our design.  That is because some
625 			 * entries could be unreachable, for instance in cases like:
626 			 * pause->return. We can never reach the return entry.
627 			 * The sysctl layer actually truncates all entries after
628 			 * a pause for this reason.
629 			 */
630 			mtx_lock_spin(&fp_setting->feq_mtx);
631 			fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
632 			mtx_unlock_spin(&fp_setting->feq_mtx);
633 			break;
634 
635 		case FAIL_POINT_YIELD:
636 			kern_yield(PRI_UNCHANGED);
637 			break;
638 
639 		case FAIL_POINT_DELAY:
640 			usecs = ent->fe_arg;
641 			DELAY(usecs);
642 			break;
643 
644 		default:
645 			break;
646 		}
647 
648 		if (cont == 0)
649 			break;
650 	}
651 
652 	if (fail_point_is_off(fp))
653 		fail_point_eval_swap_out(fp, fp_setting);
654 
655 abort:
656 	fail_point_setting_release_ref(fp);
657 
658 	return (ret);
659 }
660 
661 /**
662  * Translate internal fail_point structure into human-readable text.
663  */
664 static void
665 fail_point_get(struct fail_point *fp, struct sbuf *sb,
666         bool verbose)
667 {
668 	struct fail_point_entry *ent;
669 	struct fail_point_setting *fp_setting;
670 	struct fail_point_entry *fp_entry_cpy;
671 	int cnt_sleeping;
672 	int idx;
673 	int printed_entry_count;
674 
675 	cnt_sleeping = 0;
676 	idx = 0;
677 	printed_entry_count = 0;
678 
679 	fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
680 	        (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
681 
682 	fp_setting = fail_point_setting_get_ref(fp);
683 
684 	if (fp_setting != NULL) {
685 		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
686 			if (ent->fe_stale)
687 				continue;
688 
689 			KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
690 			        ("FP entry list larger than allowed"));
691 
692 			fp_entry_cpy[printed_entry_count] = *ent;
693 			++printed_entry_count;
694 		}
695 	}
696 	fail_point_setting_release_ref(fp);
697 
698 	/* This is our equivalent of a NULL terminator */
699 	fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
700 
701 	while (idx < printed_entry_count) {
702 		ent = &fp_entry_cpy[idx];
703 		++idx;
704 		if (ent->fe_prob < PROB_MAX) {
705 			int decimal = ent->fe_prob % (PROB_MAX / 100);
706 			int units = ent->fe_prob / (PROB_MAX / 100);
707 			sbuf_printf(sb, "%d", units);
708 			if (decimal) {
709 				int digits = PROB_DIGITS - 2;
710 				while (!(decimal % 10)) {
711 					digits--;
712 					decimal /= 10;
713 				}
714 				sbuf_printf(sb, ".%0*d", digits, decimal);
715 			}
716 			sbuf_printf(sb, "%%");
717 		}
718 		if (ent->fe_count >= 0)
719 			sbuf_printf(sb, "%d*", ent->fe_count);
720 		sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
721 		if (ent->fe_arg)
722 			sbuf_printf(sb, "(%d)", ent->fe_arg);
723 		if (ent->fe_pid != NO_PID)
724 			sbuf_printf(sb, "[pid %d]", ent->fe_pid);
725 		if (TAILQ_NEXT(ent, fe_entries))
726 			sbuf_printf(sb, "->");
727 	}
728 	if (!printed_entry_count)
729 		sbuf_printf(sb, "off");
730 
731 	fp_free(fp_entry_cpy);
732 	if (verbose) {
733 #ifdef STACK
734 		/* Print number of sleeping threads. queue=0 is the argument
735 		 * used by msleep when sending our threads to sleep. */
736 		sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
737 		sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
738 		        &cnt_sleeping);
739 
740 		sbuf_printf(sb, "},\n");
741 #endif
742 		sbuf_printf(sb, "sleeping_thread_count = %d,\n",
743 		        cnt_sleeping);
744 
745 #ifdef STACK
746 		sbuf_printf(sb, "paused_thread_stacks = {\n");
747 		sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
748 		        &cnt_sleeping);
749 
750 		sbuf_printf(sb, "},\n");
751 #endif
752 		sbuf_printf(sb, "paused_thread_count = %d\n",
753 		        cnt_sleeping);
754 	}
755 }
756 
757 /**
758  * Set an internal fail_point structure from a human-readable failpoint string
759  * in a lock-safe manner.
760  */
761 static int
762 fail_point_set(struct fail_point *fp, char *buf)
763 {
764 	struct fail_point_entry *ent, *ent_next;
765 	struct fail_point_setting *entries;
766 	bool should_wake_paused;
767 	bool should_truncate;
768 	int error;
769 
770 	error = 0;
771 	should_wake_paused = false;
772 	should_truncate = false;
773 
774 	/* Parse new entries. */
775 	/**
776 	 * ref protects our new malloc'd stuff from being garbage collected
777 	 * before we link it.
778 	 */
779 	fail_point_setting_get_ref(fp);
780 	entries = fail_point_setting_new(fp);
781 	if (parse_fail_point(entries, buf) == NULL) {
782 		STAILQ_REMOVE(&fp_setting_garbage, entries,
783 		        fail_point_setting, fs_garbage_link);
784 		fail_point_setting_destroy(entries);
785 		error = EINVAL;
786 		goto end;
787 	}
788 
789 	/**
790 	 * Transfer the entries we are going to keep to a new list.
791 	 * Get rid of useless zero probability entries, and entries with hit
792 	 * count 0.
793 	 * If 'off' is present, and it has no hit count set, then all entries
794 	 *       after it are discarded since they are unreachable.
795 	 */
796 	TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
797 		if (ent->fe_prob == 0 || ent->fe_count == 0) {
798 			printf("Discarding entry which cannot execute %s\n",
799 			        fail_type_strings[ent->fe_type].name);
800 			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
801 			        fe_entries);
802 			fp_free(ent);
803 			continue;
804 		} else if (should_truncate) {
805 			printf("Discarding unreachable entry %s\n",
806 			        fail_type_strings[ent->fe_type].name);
807 			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
808 			        fe_entries);
809 			fp_free(ent);
810 			continue;
811 		}
812 
813 		if (ent->fe_type == FAIL_POINT_OFF) {
814 			should_wake_paused = true;
815 			if (ent->fe_count == FE_COUNT_UNTRACKED) {
816 				should_truncate = true;
817 				TAILQ_REMOVE(&entries->fp_entry_queue, ent,
818 				        fe_entries);
819 				fp_free(ent);
820 			}
821 		} else if (ent->fe_type == FAIL_POINT_PAUSE) {
822 			should_truncate = true;
823 		} else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
824 		        FAIL_POINT_NONSLEEPABLE)) {
825 			/**
826 			 * If this fail point is annotated as being in a
827 			 * non-sleepable ctx, convert sleep to delay and
828 			 * convert the msec argument to usecs.
829 			 */
830 			printf("Sleep call request on fail point in "
831 			        "non-sleepable context; using delay instead "
832 			        "of sleep\n");
833 			ent->fe_type = FAIL_POINT_DELAY;
834 			ent->fe_arg *= 1000;
835 		}
836 	}
837 
838 	if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
839 		entries = fail_point_swap_settings(fp, NULL);
840 		if (entries != NULL)
841 			wakeup(FP_PAUSE_CHANNEL(fp));
842 	} else {
843 		if (should_wake_paused)
844 			wakeup(FP_PAUSE_CHANNEL(fp));
845 		fail_point_swap_settings(fp, entries);
846 	}
847 
848 end:
849 #ifdef IWARNING
850 	if (error)
851 		IWARNING("Failed to set %s %s to %s",
852 		    fp->fp_name, fp->fp_location, buf);
853 	else
854 		INOTICE("Set %s %s to %s",
855 		    fp->fp_name, fp->fp_location, buf);
856 #endif /* IWARNING */
857 
858 	fail_point_setting_release_ref(fp);
859 	return (error);
860 }
861 
862 #define MAX_FAIL_POINT_BUF	1023
863 
864 /**
865  * Handle kernel failpoint set/get.
866  */
867 int
868 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
869 {
870 	struct fail_point *fp;
871 	char *buf;
872 	struct sbuf sb, *sb_check;
873 	int error;
874 
875 	buf = NULL;
876 	error = 0;
877 	fp = arg1;
878 
879 	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
880 	if (sb_check != &sb)
881 		return (ENOMEM);
882 
883 	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
884 
885 	/* Setting */
886 	/**
887 	 * Lock protects any new entries from being garbage collected before we
888 	 * can link them to the fail point.
889 	 */
890 	sx_xlock(&sx_fp_set);
891 	if (req->newptr) {
892 		if (req->newlen > MAX_FAIL_POINT_BUF) {
893 			error = EINVAL;
894 			goto out;
895 		}
896 
897 		buf = fp_malloc(req->newlen + 1, M_WAITOK);
898 
899 		error = SYSCTL_IN(req, buf, req->newlen);
900 		if (error)
901 			goto out;
902 		buf[req->newlen] = '\0';
903 
904 		error = fail_point_set(fp, buf);
905 	}
906 
907 	fail_point_garbage_collect();
908 	sx_xunlock(&sx_fp_set);
909 
910 	/* Retrieving. */
911 	fail_point_get(fp, &sb, false);
912 
913 out:
914 	sbuf_finish(&sb);
915 	sbuf_delete(&sb);
916 
917 	if (buf)
918 		fp_free(buf);
919 
920 	return (error);
921 }
922 
923 int
924 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
925 {
926 	struct fail_point *fp;
927 	struct sbuf sb, *sb_check;
928 
929 	fp = arg1;
930 
931 	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
932 	if (sb_check != &sb)
933 		return (ENOMEM);
934 
935 	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
936 
937 	/* Retrieving. */
938 	fail_point_get(fp, &sb, true);
939 
940 	sbuf_finish(&sb);
941 	sbuf_delete(&sb);
942 
943 	/**
944 	 * Lock protects any new entries from being garbage collected before we
945 	 * can link them to the fail point.
946 	 */
947 	sx_xlock(&sx_fp_set);
948 	fail_point_garbage_collect();
949 	sx_xunlock(&sx_fp_set);
950 
951 	return (0);
952 }
953 
954 int
955 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
956 {
957 	struct sysctl_req *sa;
958 	int error;
959 
960 	sa = sysctl_args;
961 
962 	error = SYSCTL_OUT(sa, buf, len);
963 
964 	if (error == ENOMEM)
965 		return (-1);
966 	else
967 		return (len);
968 }
969 
970 /**
971  * Internal helper function to translate a human-readable failpoint string
972  * into a internally-parsable fail_point structure.
973  */
974 static char *
975 parse_fail_point(struct fail_point_setting *ents, char *p)
976 {
977 	/*  <fail_point> ::
978 	 *      <term> ( "->" <term> )*
979 	 */
980 	uint8_t term_count;
981 
982 	term_count = 1;
983 
984 	p = parse_term(ents, p);
985 	if (p == NULL)
986 		return (NULL);
987 
988 	while (*p != '\0') {
989 		term_count++;
990 		if (p[0] != '-' || p[1] != '>' ||
991 		        (p = parse_term(ents, p+2)) == NULL ||
992 		        term_count > FP_MAX_ENTRY_COUNT)
993 			return (NULL);
994 	}
995 	return (p);
996 }
997 
998 /**
999  * Internal helper function to parse an individual term from a failpoint.
1000  */
1001 static char *
1002 parse_term(struct fail_point_setting *ents, char *p)
1003 {
1004 	struct fail_point_entry *ent;
1005 
1006 	ent = fail_point_entry_new(ents);
1007 
1008 	/*
1009 	 * <term> ::
1010 	 *     ( (<float> "%") | (<integer> "*" ) )*
1011 	 *     <type>
1012 	 *     [ "(" <integer> ")" ]
1013 	 *     [ "[pid " <integer> "]" ]
1014 	 */
1015 
1016 	/* ( (<float> "%") | (<integer> "*" ) )* */
1017 	while (isdigit(*p) || *p == '.') {
1018 		int units, decimal;
1019 
1020 		p = parse_number(&units, &decimal, p);
1021 		if (p == NULL)
1022 			return (NULL);
1023 
1024 		if (*p == '%') {
1025 			if (units > 100) /* prevent overflow early */
1026 				units = 100;
1027 			ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1028 			if (ent->fe_prob > PROB_MAX)
1029 				ent->fe_prob = PROB_MAX;
1030 		} else if (*p == '*') {
1031 			if (!units || units < 0 || decimal)
1032 				return (NULL);
1033 			ent->fe_count = units;
1034 		} else
1035 			return (NULL);
1036 		p++;
1037 	}
1038 
1039 	/* <type> */
1040 	p = parse_type(ent, p);
1041 	if (p == NULL)
1042 		return (NULL);
1043 	if (*p == '\0')
1044 		return (p);
1045 
1046 	/* [ "(" <integer> ")" ] */
1047 	if (*p != '(')
1048 		return (p);
1049 	p++;
1050 	if (!isdigit(*p) && *p != '-')
1051 		return (NULL);
1052 	ent->fe_arg = strtol(p, &p, 0);
1053 	if (*p++ != ')')
1054 		return (NULL);
1055 
1056 	/* [ "[pid " <integer> "]" ] */
1057 #define PID_STRING "[pid "
1058 	if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1059 		return (p);
1060 	p += sizeof(PID_STRING) - 1;
1061 	if (!isdigit(*p))
1062 		return (NULL);
1063 	ent->fe_pid = strtol(p, &p, 0);
1064 	if (*p++ != ']')
1065 		return (NULL);
1066 
1067 	return (p);
1068 }
1069 
1070 /**
1071  * Internal helper function to parse a numeric for a failpoint term.
1072  */
1073 static char *
1074 parse_number(int *out_units, int *out_decimal, char *p)
1075 {
1076 	char *old_p;
1077 
1078 	/**
1079 	 *  <number> ::
1080 	 *      <integer> [ "." <integer> ] |
1081 	 *      "." <integer>
1082 	 */
1083 
1084 	/* whole part */
1085 	old_p = p;
1086 	*out_units = strtol(p, &p, 10);
1087 	if (p == old_p && *p != '.')
1088 		return (NULL);
1089 
1090 	/* fractional part */
1091 	*out_decimal = 0;
1092 	if (*p == '.') {
1093 		int digits = 0;
1094 		p++;
1095 		while (isdigit(*p)) {
1096 			int digit = *p - '0';
1097 			if (digits < PROB_DIGITS - 2)
1098 				*out_decimal = *out_decimal * 10 + digit;
1099 			else if (digits == PROB_DIGITS - 2 && digit >= 5)
1100 				(*out_decimal)++;
1101 			digits++;
1102 			p++;
1103 		}
1104 		if (!digits) /* need at least one digit after '.' */
1105 			return (NULL);
1106 		while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1107 			*out_decimal *= 10;
1108 	}
1109 
1110 	return (p); /* success */
1111 }
1112 
1113 /**
1114  * Internal helper function to parse an individual type for a failpoint term.
1115  */
1116 static char *
1117 parse_type(struct fail_point_entry *ent, char *beg)
1118 {
1119 	enum fail_point_t type;
1120 	int len;
1121 
1122 	for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1123 		len = fail_type_strings[type].nmlen;
1124 		if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1125 			ent->fe_type = type;
1126 			return (beg + len);
1127 		}
1128 	}
1129 	return (NULL);
1130 }
1131 
1132 /* The fail point sysctl tree. */
1133 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1134     "fail points");
1135 
1136 /* Debugging/testing stuff for fail point */
1137 static int
1138 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1139 {
1140 
1141 	KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1142 	return (0);
1143 }
1144 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1145     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1146     sysctl_test_fail_point, "A",
1147     "Trigger test fail points");
1148