kern_fail.c (98e0ffaefb0f241cda3a72395d3be04192ae0d47) | kern_fail.c (70e20d4e1a6b23cb7bd8b869d1ff782c590f0fa8) |
---|---|
1/*- 2 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/ 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 43 unchanged lines hidden (view full) --- 52#include <sys/cdefs.h> 53__FBSDID("$FreeBSD$"); 54 55#include <sys/ctype.h> 56#include <sys/errno.h> 57#include <sys/fail.h> 58#include <sys/kernel.h> 59#include <sys/libkern.h> | 1/*- 2 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/ 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 43 unchanged lines hidden (view full) --- 52#include <sys/cdefs.h> 53__FBSDID("$FreeBSD$"); 54 55#include <sys/ctype.h> 56#include <sys/errno.h> 57#include <sys/fail.h> 58#include <sys/kernel.h> 59#include <sys/libkern.h> |
60#include <sys/limits.h> |
|
60#include <sys/lock.h> 61#include <sys/malloc.h> 62#include <sys/mutex.h> 63#include <sys/proc.h> 64#include <sys/sbuf.h> | 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/proc.h> 65#include <sys/sbuf.h> |
66#include <sys/sleepqueue.h> 67#include <sys/sx.h> 68#include <sys/sysctl.h> 69#include <sys/types.h> |
|
65 | 70 |
71#include <machine/atomic.h> |
|
66#include <machine/stdarg.h> 67 68#ifdef ILOG_DEFINE_FOR_FILE 69ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point); 70#endif 71 72static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system"); 73#define fp_free(ptr) free(ptr, M_FAIL_POINT) 74#define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags)) | 72#include <machine/stdarg.h> 73 74#ifdef ILOG_DEFINE_FOR_FILE 75ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point); 76#endif 77 78static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system"); 79#define fp_free(ptr) free(ptr, M_FAIL_POINT) 80#define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags)) |
81#define fs_free(ptr) fp_free(ptr) 82#define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \ 83 M_WAITOK | M_ZERO) |
|
75 | 84 |
76static struct mtx g_fp_mtx; 77MTX_SYSINIT(g_fp_mtx, &g_fp_mtx, "fail point mtx", MTX_DEF); 78#define FP_LOCK() mtx_lock(&g_fp_mtx) 79#define FP_UNLOCK() mtx_unlock(&g_fp_mtx) | 85 /** 86 * These define the wchans that are used for sleeping, pausing respectively. 87 * They are chosen arbitrarily but need to be distinct to the failpoint and 88 * the sleep/pause distinction. 89 */ 90#define FP_SLEEP_CHANNEL(fp) (void*)(fp) 91#define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting) |
80 81/** | 92 93/** |
94 * Don't allow more than this many entries in a fail point set by sysctl. 95 * The 99.99...% case is to have 1 entry. I can't imagine having this many 96 * entries, so it should not limit us. Saves on re-mallocs while holding 97 * a non-sleepable lock. 98 */ 99#define FP_MAX_ENTRY_COUNT 20 100 101/* Used to drain sbufs to the sysctl output */ 102int fail_sysctl_drain_func(void *, const char *, int); 103 104/* Head of tailq of struct fail_point_entry */ 105TAILQ_HEAD(fail_point_entry_queue, fail_point_entry); 106 107/** 108 * fp entries garbage list; outstanding entries are cleaned up in the 109 * garbage collector 110 */ 111STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting); 112static struct fail_point_setting_garbage fp_setting_garbage = 113 STAILQ_HEAD_INITIALIZER(fp_setting_garbage); 114static struct mtx mtx_garbage_list; 115MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx", 116 MTX_SPIN); 117 118static struct sx sx_fp_set; 119SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx"); 120 121/** |
|
82 * Failpoint types. 83 * Don't change these without changing fail_type_strings in fail.c. 84 * @ingroup failpoint_private 85 */ 86enum fail_point_t { 87 FAIL_POINT_OFF, /**< don't fail */ 88 FAIL_POINT_PANIC, /**< panic */ 89 FAIL_POINT_RETURN, /**< return an errorcode */ 90 FAIL_POINT_BREAK, /**< break into the debugger */ 91 FAIL_POINT_PRINT, /**< print a message */ 92 FAIL_POINT_SLEEP, /**< sleep for some msecs */ | 122 * Failpoint types. 123 * Don't change these without changing fail_type_strings in fail.c. 124 * @ingroup failpoint_private 125 */ 126enum fail_point_t { 127 FAIL_POINT_OFF, /**< don't fail */ 128 FAIL_POINT_PANIC, /**< panic */ 129 FAIL_POINT_RETURN, /**< return an errorcode */ 130 FAIL_POINT_BREAK, /**< break into the debugger */ 131 FAIL_POINT_PRINT, /**< print a message */ 132 FAIL_POINT_SLEEP, /**< sleep for some msecs */ |
93 FAIL_POINT_NUMTYPES | 133 FAIL_POINT_PAUSE, /**< sleep until failpoint is set to off */ 134 FAIL_POINT_YIELD, /**< yield the cpu */ 135 FAIL_POINT_DELAY, /**< busy wait the cpu */ 136 FAIL_POINT_NUMTYPES, 137 FAIL_POINT_INVALID = -1 |
94}; 95 96static struct { 97 const char *name; 98 int nmlen; 99} fail_type_strings[] = { 100#define FP_TYPE_NM_LEN(s) { s, sizeof(s) - 1 } 101 [FAIL_POINT_OFF] = FP_TYPE_NM_LEN("off"), 102 [FAIL_POINT_PANIC] = FP_TYPE_NM_LEN("panic"), 103 [FAIL_POINT_RETURN] = FP_TYPE_NM_LEN("return"), 104 [FAIL_POINT_BREAK] = FP_TYPE_NM_LEN("break"), 105 [FAIL_POINT_PRINT] = FP_TYPE_NM_LEN("print"), 106 [FAIL_POINT_SLEEP] = FP_TYPE_NM_LEN("sleep"), | 138}; 139 140static struct { 141 const char *name; 142 int nmlen; 143} fail_type_strings[] = { 144#define FP_TYPE_NM_LEN(s) { s, sizeof(s) - 1 } 145 [FAIL_POINT_OFF] = FP_TYPE_NM_LEN("off"), 146 [FAIL_POINT_PANIC] = FP_TYPE_NM_LEN("panic"), 147 [FAIL_POINT_RETURN] = FP_TYPE_NM_LEN("return"), 148 [FAIL_POINT_BREAK] = FP_TYPE_NM_LEN("break"), 149 [FAIL_POINT_PRINT] = FP_TYPE_NM_LEN("print"), 150 [FAIL_POINT_SLEEP] = FP_TYPE_NM_LEN("sleep"), |
151 [FAIL_POINT_PAUSE] = FP_TYPE_NM_LEN("pause"), 152 [FAIL_POINT_YIELD] = FP_TYPE_NM_LEN("yield"), 153 [FAIL_POINT_DELAY] = FP_TYPE_NM_LEN("delay"), |
|
107}; 108 | 154}; 155 |
156#define FE_COUNT_UNTRACKED (INT_MIN) 157 |
|
109/** 110 * Internal structure tracking a single term of a complete failpoint. 111 * @ingroup failpoint_private 112 */ 113struct fail_point_entry { | 158/** 159 * Internal structure tracking a single term of a complete failpoint. 160 * @ingroup failpoint_private 161 */ 162struct fail_point_entry { |
114 enum fail_point_t fe_type; /**< type of entry */ | 163 volatile bool fe_stale; 164 enum fail_point_t fe_type; /**< type of entry */ |
115 int fe_arg; /**< argument to type (e.g. return value) */ 116 int fe_prob; /**< likelihood of firing in millionths */ | 165 int fe_arg; /**< argument to type (e.g. return value) */ 166 int fe_prob; /**< likelihood of firing in millionths */ |
117 int fe_count; /**< number of times to fire, 0 means always */ | 167 int fe_count; /**< number of times to fire, -1 means infinite */ |
118 pid_t fe_pid; /**< only fail for this process */ | 168 pid_t fe_pid; /**< only fail for this process */ |
119 TAILQ_ENTRY(fail_point_entry) fe_entries; /**< next entry in fail point */ | 169 struct fail_point *fe_parent; /**< backpointer to fp */ 170 TAILQ_ENTRY(fail_point_entry) fe_entries; /**< next entry ptr */ |
120}; 121 | 171}; 172 |
173struct fail_point_setting { 174 STAILQ_ENTRY(fail_point_setting) fs_garbage_link; 175 struct fail_point_entry_queue fp_entry_queue; 176 struct fail_point * fs_parent; 177 struct mtx feq_mtx; /* Gives fail_point_pause something to do. */ 178}; 179 180/** 181 * Defines stating the equivalent of probablilty one (100%) 182 */ 183enum { 184 PROB_MAX = 1000000, /* probability between zero and this number */ 185 PROB_DIGITS = 6 /* number of zero's in above number */ 186}; 187 188/* Get a ref on an fp's fp_setting */ 189static inline struct fail_point_setting *fail_point_setting_get_ref( 190 struct fail_point *fp); 191/* Release a ref on an fp_setting */ 192static inline void fail_point_setting_release_ref(struct fail_point *fp); 193/* Allocate and initialize a struct fail_point_setting */ 194static struct fail_point_setting *fail_point_setting_new(struct 195 fail_point *); 196/* Free a struct fail_point_setting */ 197static void fail_point_setting_destroy(struct fail_point_setting *fp_setting); 198/* Allocate and initialize a struct fail_point_entry */ 199static struct fail_point_entry *fail_point_entry_new(struct 200 fail_point_setting *); 201/* Free a struct fail_point_entry */ 202static void fail_point_entry_destroy(struct fail_point_entry *fp_entry); 203/* Append fp setting to garbage list */ 204static inline void fail_point_setting_garbage_append( 205 struct fail_point_setting *fp_setting); 206/* Swap fp's setting with fp_setting_new */ 207static inline struct fail_point_setting * 208 fail_point_swap_settings(struct fail_point *fp, 209 struct fail_point_setting *fp_setting_new); 210/* Free up any zero-ref setting in the garbage queue */ 211static void fail_point_garbage_collect(void); 212/* If this fail point's setting are empty, then swap it out to NULL. */ 213static inline void fail_point_eval_swap_out(struct fail_point *fp, 214 struct fail_point_setting *fp_setting); 215 216bool 217fail_point_is_off(struct fail_point *fp) 218{ 219 bool return_val; 220 struct fail_point_setting *fp_setting; 221 struct fail_point_entry *ent; 222 223 return_val = true; 224 225 fp_setting = fail_point_setting_get_ref(fp); 226 if (fp_setting != NULL) { 227 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, 228 fe_entries) { 229 if (!ent->fe_stale) { 230 return_val = false; 231 break; 232 } 233 } 234 } 235 fail_point_setting_release_ref(fp); 236 237 return (return_val); 238} 239 240/* Allocate and initialize a struct fail_point_setting */ 241static struct fail_point_setting * 242fail_point_setting_new(struct fail_point *fp) 243{ 244 struct fail_point_setting *fs_new; 245 246 fs_new = fs_malloc(); 247 fs_new->fs_parent = fp; 248 TAILQ_INIT(&fs_new->fp_entry_queue); 249 mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN); 250 251 fail_point_setting_garbage_append(fs_new); 252 253 return (fs_new); 254} 255 256/* Free a struct fail_point_setting */ 257static void 258fail_point_setting_destroy(struct fail_point_setting *fp_setting) 259{ 260 struct fail_point_entry *ent; 261 262 while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) { 263 ent = TAILQ_FIRST(&fp_setting->fp_entry_queue); 264 TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries); 265 fail_point_entry_destroy(ent); 266 } 267 268 fs_free(fp_setting); 269} 270 271/* Allocate and initialize a struct fail_point_entry */ 272static struct fail_point_entry * 273fail_point_entry_new(struct fail_point_setting *fp_setting) 274{ 275 struct fail_point_entry *fp_entry; 276 277 fp_entry = fp_malloc(sizeof(struct fail_point_entry), 278 M_WAITOK | M_ZERO); 279 fp_entry->fe_parent = fp_setting->fs_parent; 280 fp_entry->fe_prob = PROB_MAX; 281 fp_entry->fe_pid = NO_PID; 282 fp_entry->fe_count = FE_COUNT_UNTRACKED; 283 TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry, 284 fe_entries); 285 286 return (fp_entry); 287} 288 289/* Free a struct fail_point_entry */ 290static void 291fail_point_entry_destroy(struct fail_point_entry *fp_entry) 292{ 293 294 fp_free(fp_entry); 295} 296 297/* Get a ref on an fp's fp_setting */ 298static inline struct fail_point_setting * 299fail_point_setting_get_ref(struct fail_point *fp) 300{ 301 struct fail_point_setting *fp_setting; 302 303 /* Invariant: if we have a ref, our pointer to fp_setting is safe */ 304 atomic_add_acq_32(&fp->fp_ref_cnt, 1); 305 fp_setting = fp->fp_setting; 306 307 return (fp_setting); 308} 309 310/* Release a ref on an fp_setting */ |
|
122static inline void | 311static inline void |
123fail_point_sleep(struct fail_point *fp, struct fail_point_entry *ent, 124 int msecs, enum fail_point_return_code *pret) | 312fail_point_setting_release_ref(struct fail_point *fp) |
125{ | 313{ |
126 /* convert from millisecs to ticks, rounding up */ 127 int timo = ((msecs * hz) + 999) / 1000; | |
128 | 314 |
315 KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs")); 316 atomic_subtract_rel_32(&fp->fp_ref_cnt, 1); 317} 318 319/* Append fp entries to fp garbage list */ 320static inline void 321fail_point_setting_garbage_append(struct fail_point_setting *fp_setting) 322{ 323 324 mtx_lock_spin(&mtx_garbage_list); 325 STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting, 326 fs_garbage_link); 327 mtx_unlock_spin(&mtx_garbage_list); 328} 329 330/* Swap fp's entries with fp_setting_new */ 331static struct fail_point_setting * 332fail_point_swap_settings(struct fail_point *fp, 333 struct fail_point_setting *fp_setting_new) 334{ 335 struct fail_point_setting *fp_setting_old; 336 337 fp_setting_old = fp->fp_setting; 338 fp->fp_setting = fp_setting_new; 339 340 return (fp_setting_old); 341} 342 343static inline void 344fail_point_eval_swap_out(struct fail_point *fp, 345 struct fail_point_setting *fp_setting) 346{ 347 348 /* We may have already been swapped out and replaced; ignore. */ 349 if (fp->fp_setting == fp_setting) 350 fail_point_swap_settings(fp, NULL); 351} 352 353/* Free up any zero-ref entries in the garbage queue */ 354static void 355fail_point_garbage_collect() 356{ 357 struct fail_point_setting *fs_current, *fs_next; 358 struct fail_point_setting_garbage fp_ents_free_list; 359 360 /** 361 * We will transfer the entries to free to fp_ents_free_list while holding 362 * the spin mutex, then free it after we drop the lock. This avoids 363 * triggering witness due to sleepable mutexes in the memory 364 * allocator. 365 */ 366 STAILQ_INIT(&fp_ents_free_list); 367 368 mtx_lock_spin(&mtx_garbage_list); 369 STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link, 370 fs_next) { 371 if (fs_current->fs_parent->fp_setting != fs_current && 372 fs_current->fs_parent->fp_ref_cnt == 0) { 373 STAILQ_REMOVE(&fp_setting_garbage, fs_current, 374 fail_point_setting, fs_garbage_link); 375 STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current, 376 fs_garbage_link); 377 } 378 } 379 mtx_unlock_spin(&mtx_garbage_list); 380 381 STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link, 382 fs_next) 383 fail_point_setting_destroy(fs_current); 384} 385 386/* Drain out all refs from this fail point */ 387static inline void 388fail_point_drain(struct fail_point *fp, int expected_ref) 389{ 390 struct fail_point_setting *entries; 391 392 entries = fail_point_swap_settings(fp, NULL); 393 /** 394 * We have unpaused all threads; so we will wait no longer 395 * than the time taken for the longest remaining sleep, or 396 * the length of time of a long-running code block. 397 */ 398 while (fp->fp_ref_cnt > expected_ref) { 399 wakeup(FP_PAUSE_CHANNEL(fp)); 400 tsleep(&fp, PWAIT, "fail_point_drain", hz / 100); 401 } 402 fail_point_swap_settings(fp, entries); 403} 404 405static inline void 406fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret, 407 struct mtx *mtx_sleep) 408{ 409 410 if (fp->fp_pre_sleep_fn) 411 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg); 412 413 msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0); 414 415 if (fp->fp_post_sleep_fn) 416 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg); 417} 418 419static inline void 420fail_point_sleep(struct fail_point *fp, int msecs, 421 enum fail_point_return_code *pret) 422{ 423 int timo; 424 425 /* Convert from millisecs to ticks, rounding up */ 426 timo = howmany(msecs * hz, 1000); 427 |
|
129 if (timo > 0) { | 428 if (timo > 0) { |
130 if (fp->fp_sleep_fn == NULL) { 131 msleep(fp, &g_fp_mtx, PWAIT, "failpt", timo); | 429 if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) { 430 if (fp->fp_pre_sleep_fn) 431 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg); 432 433 tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo); 434 435 if (fp->fp_post_sleep_fn) 436 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg); |
132 } else { | 437 } else { |
133 timeout(fp->fp_sleep_fn, fp->fp_sleep_arg, timo); | 438 if (fp->fp_pre_sleep_fn) 439 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg); 440 441 timeout(fp->fp_post_sleep_fn, fp->fp_post_sleep_arg, 442 timo); |
134 *pret = FAIL_POINT_RC_QUEUED; 135 } 136 } 137} 138 | 443 *pret = FAIL_POINT_RC_QUEUED; 444 } 445 } 446} 447 |
139 140/** 141 * Defines stating the equivalent of probablilty one (100%) 142 */ 143enum { 144 PROB_MAX = 1000000, /* probability between zero and this number */ 145 PROB_DIGITS = 6, /* number of zero's in above number */ 146}; 147 148static char *parse_fail_point(struct fail_point_entries *, char *); 149static char *parse_term(struct fail_point_entries *, char *); | 448static char *parse_fail_point(struct fail_point_setting *, char *); 449static char *parse_term(struct fail_point_setting *, char *); |
150static char *parse_number(int *out_units, int *out_decimal, char *); 151static char *parse_type(struct fail_point_entry *, char *); | 450static char *parse_number(int *out_units, int *out_decimal, char *); 451static char *parse_type(struct fail_point_entry *, char *); |
152static void free_entry(struct fail_point_entries *, struct fail_point_entry *); 153static void clear_entries(struct fail_point_entries *); | |
154 155/** 156 * Initialize a fail_point. The name is formed in a printf-like fashion 157 * from "fmt" and subsequent arguments. This function is generally used 158 * for custom failpoints located at odd places in the sysctl tree, and is 159 * not explicitly needed for standard in-line-declared failpoints. 160 * 161 * @ingroup failpoint 162 */ 163void 164fail_point_init(struct fail_point *fp, const char *fmt, ...) 165{ 166 va_list ap; 167 char *name; 168 int n; 169 | 452 453/** 454 * Initialize a fail_point. The name is formed in a printf-like fashion 455 * from "fmt" and subsequent arguments. This function is generally used 456 * for custom failpoints located at odd places in the sysctl tree, and is 457 * not explicitly needed for standard in-line-declared failpoints. 458 * 459 * @ingroup failpoint 460 */ 461void 462fail_point_init(struct fail_point *fp, const char *fmt, ...) 463{ 464 va_list ap; 465 char *name; 466 int n; 467 |
170 TAILQ_INIT(&fp->fp_entries); | 468 fp->fp_setting = NULL; |
171 fp->fp_flags = 0; 172 173 /* Figure out the size of the name. */ 174 va_start(ap, fmt); 175 n = vsnprintf(NULL, 0, fmt, ap); 176 va_end(ap); 177 178 /* Allocate the name and fill it in. */ 179 name = fp_malloc(n + 1, M_WAITOK); 180 if (name != NULL) { 181 va_start(ap, fmt); 182 vsnprintf(name, n + 1, fmt, ap); 183 va_end(ap); 184 } 185 fp->fp_name = name; 186 fp->fp_location = ""; 187 fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME; | 469 fp->fp_flags = 0; 470 471 /* Figure out the size of the name. */ 472 va_start(ap, fmt); 473 n = vsnprintf(NULL, 0, fmt, ap); 474 va_end(ap); 475 476 /* Allocate the name and fill it in. */ 477 name = fp_malloc(n + 1, M_WAITOK); 478 if (name != NULL) { 479 va_start(ap, fmt); 480 vsnprintf(name, n + 1, fmt, ap); 481 va_end(ap); 482 } 483 fp->fp_name = name; 484 fp->fp_location = ""; 485 fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME; |
188 fp->fp_sleep_fn = NULL; 189 fp->fp_sleep_arg = NULL; | 486 fp->fp_pre_sleep_fn = NULL; 487 fp->fp_pre_sleep_arg = NULL; 488 fp->fp_post_sleep_fn = NULL; 489 fp->fp_post_sleep_arg = NULL; |
190} 191 192/** | 490} 491 492/** |
193 * Free the resources held by a fail_point. 194 * | 493 * Free the resources held by a fail_point, and wake any paused threads. 494 * Thou shalt not allow threads to hit this fail point after you enter this 495 * function, nor shall you call this multiple times for a given fp. |
195 * @ingroup failpoint 196 */ 197void 198fail_point_destroy(struct fail_point *fp) 199{ 200 | 496 * @ingroup failpoint 497 */ 498void 499fail_point_destroy(struct fail_point *fp) 500{ 501 |
502 fail_point_drain(fp, 0); 503 |
|
201 if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) { 202 fp_free(__DECONST(void *, fp->fp_name)); 203 fp->fp_name = NULL; 204 } 205 fp->fp_flags = 0; | 504 if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) { 505 fp_free(__DECONST(void *, fp->fp_name)); 506 fp->fp_name = NULL; 507 } 508 fp->fp_flags = 0; |
206 clear_entries(&fp->fp_entries); | 509 510 sx_xlock(&sx_fp_set); 511 fail_point_garbage_collect(); 512 sx_xunlock(&sx_fp_set); |
207} 208 209/** 210 * This does the real work of evaluating a fail point. If the fail point tells 211 * us to return a value, this function returns 1 and fills in 'return_value' 212 * (return_value is allowed to be null). If the fail point tells us to panic, 213 * we never return. Otherwise we just return 0 after doing some work, which 214 * means "keep going". 215 */ 216enum fail_point_return_code 217fail_point_eval_nontrivial(struct fail_point *fp, int *return_value) 218{ | 513} 514 515/** 516 * This does the real work of evaluating a fail point. If the fail point tells 517 * us to return a value, this function returns 1 and fills in 'return_value' 518 * (return_value is allowed to be null). If the fail point tells us to panic, 519 * we never return. Otherwise we just return 0 after doing some work, which 520 * means "keep going". 521 */ 522enum fail_point_return_code 523fail_point_eval_nontrivial(struct fail_point *fp, int *return_value) 524{ |
219 enum fail_point_return_code ret = FAIL_POINT_RC_CONTINUE; 220 struct fail_point_entry *ent, *next; | 525 bool execute = false; 526 struct fail_point_entry *ent; 527 struct fail_point_setting *fp_setting; 528 enum fail_point_return_code ret; 529 int cont; 530 int count; |
221 int msecs; | 531 int msecs; |
532 int usecs; |
|
222 | 533 |
223 FP_LOCK(); | 534 ret = FAIL_POINT_RC_CONTINUE; 535 cont = 0; /* don't continue by default */ |
224 | 536 |
225 TAILQ_FOREACH_SAFE(ent, &fp->fp_entries, fe_entries, next) { 226 int cont = 0; /* don't continue by default */ | 537 fp_setting = fail_point_setting_get_ref(fp); 538 if (fp_setting == NULL) 539 goto abort; |
227 | 540 |
541 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) { 542 543 if (ent->fe_stale) 544 continue; 545 |
|
228 if (ent->fe_prob < PROB_MAX && 229 ent->fe_prob < random() % PROB_MAX) 230 continue; | 546 if (ent->fe_prob < PROB_MAX && 547 ent->fe_prob < random() % PROB_MAX) 548 continue; |
549 |
|
231 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid) 232 continue; 233 | 550 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid) 551 continue; 552 |
553 if (ent->fe_count != FE_COUNT_UNTRACKED) { 554 count = ent->fe_count; 555 while (count > 0) { 556 if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) { 557 count--; 558 execute = true; 559 break; 560 } 561 count = ent->fe_count; 562 } 563 if (execute == false) 564 /* We lost the race; consider the entry stale and bail now */ 565 continue; 566 if (count == 0) 567 ent->fe_stale = true; 568 } 569 |
|
234 switch (ent->fe_type) { 235 case FAIL_POINT_PANIC: 236 panic("fail point %s panicking", fp->fp_name); 237 /* NOTREACHED */ 238 239 case FAIL_POINT_RETURN: 240 if (return_value != NULL) 241 *return_value = ent->fe_arg; 242 ret = FAIL_POINT_RC_RETURN; 243 break; 244 245 case FAIL_POINT_BREAK: 246 printf("fail point %s breaking to debugger\n", | 570 switch (ent->fe_type) { 571 case FAIL_POINT_PANIC: 572 panic("fail point %s panicking", fp->fp_name); 573 /* NOTREACHED */ 574 575 case FAIL_POINT_RETURN: 576 if (return_value != NULL) 577 *return_value = ent->fe_arg; 578 ret = FAIL_POINT_RC_RETURN; 579 break; 580 581 case FAIL_POINT_BREAK: 582 printf("fail point %s breaking to debugger\n", |
247 fp->fp_name); | 583 fp->fp_name); |
248 breakpoint(); 249 break; 250 251 case FAIL_POINT_PRINT: 252 printf("fail point %s executing\n", fp->fp_name); 253 cont = ent->fe_arg; 254 break; 255 256 case FAIL_POINT_SLEEP: | 584 breakpoint(); 585 break; 586 587 case FAIL_POINT_PRINT: 588 printf("fail point %s executing\n", fp->fp_name); 589 cont = ent->fe_arg; 590 break; 591 592 case FAIL_POINT_SLEEP: |
257 /* 258 * Free the entry now if necessary, since 259 * we're about to drop the mutex and sleep. 260 */ | |
261 msecs = ent->fe_arg; | 593 msecs = ent->fe_arg; |
262 if (ent->fe_count > 0 && --ent->fe_count == 0) { 263 free_entry(&fp->fp_entries, ent); 264 ent = NULL; 265 } 266 | |
267 if (msecs) | 594 if (msecs) |
268 fail_point_sleep(fp, ent, msecs, &ret); | 595 fail_point_sleep(fp, msecs, &ret); |
269 break; 270 | 596 break; 597 |
598 case FAIL_POINT_PAUSE: 599 /** 600 * Pausing is inherently strange with multiple 601 * entries given our design. That is because some 602 * entries could be unreachable, for instance in cases like: 603 * pause->return. We can never reach the return entry. 604 * The sysctl layer actually truncates all entries after 605 * a pause for this reason. 606 */ 607 mtx_lock_spin(&fp_setting->feq_mtx); 608 fail_point_pause(fp, &ret, &fp_setting->feq_mtx); 609 mtx_unlock_spin(&fp_setting->feq_mtx); 610 break; 611 612 case FAIL_POINT_YIELD: 613 kern_yield(-1); 614 break; 615 616 case FAIL_POINT_DELAY: 617 usecs = ent->fe_arg; 618 DELAY(usecs); 619 break; 620 |
|
271 default: 272 break; 273 } 274 | 621 default: 622 break; 623 } 624 |
275 if (ent != NULL && ent->fe_count > 0 && --ent->fe_count == 0) 276 free_entry(&fp->fp_entries, ent); | |
277 if (cont == 0) 278 break; 279 } 280 | 625 if (cont == 0) 626 break; 627 } 628 |
281 /* Get rid of "off"s at the end. */ 282 while ((ent = TAILQ_LAST(&fp->fp_entries, fail_point_entries)) && 283 ent->fe_type == FAIL_POINT_OFF) 284 free_entry(&fp->fp_entries, ent); | 629 if (fail_point_is_off(fp)) 630 fail_point_eval_swap_out(fp, fp_setting); |
285 | 631 |
286 FP_UNLOCK(); | 632abort: 633 fail_point_setting_release_ref(fp); |
287 288 return (ret); | 634 635 return (ret); |
636 |
|
289} 290 291/** 292 * Translate internal fail_point structure into human-readable text. 293 */ 294static void | 637} 638 639/** 640 * Translate internal fail_point structure into human-readable text. 641 */ 642static void |
295fail_point_get(struct fail_point *fp, struct sbuf *sb) | 643fail_point_get(struct fail_point *fp, struct sbuf *sb, 644 bool verbose) |
296{ 297 struct fail_point_entry *ent; | 645{ 646 struct fail_point_entry *ent; |
647 struct fail_point_setting *fp_setting; 648 struct fail_point_entry *fp_entry_cpy; 649 int cnt_sleeping; 650 int idx; 651 int printed_entry_count; |
|
298 | 652 |
299 FP_LOCK(); | 653 cnt_sleeping = 0; 654 idx = 0; 655 printed_entry_count = 0; |
300 | 656 |
301 TAILQ_FOREACH(ent, &fp->fp_entries, fe_entries) { | 657 fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) * 658 (FP_MAX_ENTRY_COUNT + 1), M_WAITOK); 659 660 fp_setting = fail_point_setting_get_ref(fp); 661 662 if (fp_setting != NULL) { 663 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) { 664 if (ent->fe_stale) 665 continue; 666 667 KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT, 668 ("FP entry list larger than allowed")); 669 670 fp_entry_cpy[printed_entry_count] = *ent; 671 ++printed_entry_count; 672 } 673 } 674 fail_point_setting_release_ref(fp); 675 676 /* This is our equivalent of a NULL terminator */ 677 fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID; 678 679 while (idx < printed_entry_count) { 680 ent = &fp_entry_cpy[idx]; 681 ++idx; |
302 if (ent->fe_prob < PROB_MAX) { 303 int decimal = ent->fe_prob % (PROB_MAX / 100); 304 int units = ent->fe_prob / (PROB_MAX / 100); 305 sbuf_printf(sb, "%d", units); 306 if (decimal) { 307 int digits = PROB_DIGITS - 2; 308 while (!(decimal % 10)) { 309 digits--; 310 decimal /= 10; 311 } 312 sbuf_printf(sb, ".%0*d", digits, decimal); 313 } 314 sbuf_printf(sb, "%%"); 315 } | 682 if (ent->fe_prob < PROB_MAX) { 683 int decimal = ent->fe_prob % (PROB_MAX / 100); 684 int units = ent->fe_prob / (PROB_MAX / 100); 685 sbuf_printf(sb, "%d", units); 686 if (decimal) { 687 int digits = PROB_DIGITS - 2; 688 while (!(decimal % 10)) { 689 digits--; 690 decimal /= 10; 691 } 692 sbuf_printf(sb, ".%0*d", digits, decimal); 693 } 694 sbuf_printf(sb, "%%"); 695 } |
316 if (ent->fe_count > 0) | 696 if (ent->fe_count >= 0) |
317 sbuf_printf(sb, "%d*", ent->fe_count); 318 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name); 319 if (ent->fe_arg) 320 sbuf_printf(sb, "(%d)", ent->fe_arg); 321 if (ent->fe_pid != NO_PID) 322 sbuf_printf(sb, "[pid %d]", ent->fe_pid); 323 if (TAILQ_NEXT(ent, fe_entries)) 324 sbuf_printf(sb, "->"); 325 } | 697 sbuf_printf(sb, "%d*", ent->fe_count); 698 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name); 699 if (ent->fe_arg) 700 sbuf_printf(sb, "(%d)", ent->fe_arg); 701 if (ent->fe_pid != NO_PID) 702 sbuf_printf(sb, "[pid %d]", ent->fe_pid); 703 if (TAILQ_NEXT(ent, fe_entries)) 704 sbuf_printf(sb, "->"); 705 } |
326 if (TAILQ_EMPTY(&fp->fp_entries)) | 706 if (!printed_entry_count) |
327 sbuf_printf(sb, "off"); 328 | 707 sbuf_printf(sb, "off"); 708 |
329 FP_UNLOCK(); | 709 fp_free(fp_entry_cpy); 710 if (verbose) { 711 /* Print number of sleeping threads. queue=0 is the argument 712 * used by msleep when sending our threads to sleep. */ 713 sbuf_printf(sb, "\nsleeping_thread_stacks = {\n"); 714 sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0, 715 &cnt_sleeping); 716 717 sbuf_printf(sb, "},\n"); 718 sbuf_printf(sb, "sleeping_thread_count = %d,\n", 719 cnt_sleeping); 720 721 sbuf_printf(sb, "paused_thread_stacks = {\n"); 722 sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0, 723 &cnt_sleeping); 724 725 sbuf_printf(sb, "},\n"); 726 sbuf_printf(sb, "paused_thread_count = %d\n", 727 cnt_sleeping); 728 } |
330} 331 332/** 333 * Set an internal fail_point structure from a human-readable failpoint string 334 * in a lock-safe manner. 335 */ 336static int 337fail_point_set(struct fail_point *fp, char *buf) 338{ | 729} 730 731/** 732 * Set an internal fail_point structure from a human-readable failpoint string 733 * in a lock-safe manner. 734 */ 735static int 736fail_point_set(struct fail_point *fp, char *buf) 737{ |
339 int error = 0; | |
340 struct fail_point_entry *ent, *ent_next; | 738 struct fail_point_entry *ent, *ent_next; |
341 struct fail_point_entries new_entries; | 739 struct fail_point_setting *entries; 740 bool should_wake_paused; 741 bool should_truncate; 742 int error; |
342 | 743 |
744 error = 0; 745 should_wake_paused = false; 746 should_truncate = false; 747 |
|
343 /* Parse new entries. */ | 748 /* Parse new entries. */ |
344 TAILQ_INIT(&new_entries); 345 if (!parse_fail_point(&new_entries, buf)) { 346 clear_entries(&new_entries); | 749 /** 750 * ref protects our new malloc'd stuff from being garbage collected 751 * before we link it. 752 */ 753 fail_point_setting_get_ref(fp); 754 entries = fail_point_setting_new(fp); 755 if (parse_fail_point(entries, buf) == NULL) { 756 STAILQ_REMOVE(&fp_setting_garbage, entries, 757 fail_point_setting, fs_garbage_link); 758 fail_point_setting_destroy(entries); |
347 error = EINVAL; 348 goto end; 349 } 350 | 759 error = EINVAL; 760 goto end; 761 } 762 |
351 FP_LOCK(); | 763 /** 764 * Transfer the entries we are going to keep to a new list. 765 * Get rid of useless zero probability entries, and entries with hit 766 * count 0. 767 * If 'off' is present, and it has no hit count set, then all entries 768 * after it are discarded since they are unreachable. 769 */ 770 TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) { 771 if (ent->fe_prob == 0 || ent->fe_count == 0) { 772 printf("Discarding entry which cannot execute %s\n", 773 fail_type_strings[ent->fe_type].name); 774 TAILQ_REMOVE(&entries->fp_entry_queue, ent, 775 fe_entries); 776 fp_free(ent); 777 continue; 778 } else if (should_truncate) { 779 printf("Discarding unreachable entry %s\n", 780 fail_type_strings[ent->fe_type].name); 781 TAILQ_REMOVE(&entries->fp_entry_queue, ent, 782 fe_entries); 783 fp_free(ent); 784 continue; 785 } |
352 | 786 |
353 /* Move new entries in. */ 354 TAILQ_SWAP(&fp->fp_entries, &new_entries, fail_point_entry, fe_entries); 355 clear_entries(&new_entries); | 787 if (ent->fe_type == FAIL_POINT_OFF) { 788 should_wake_paused = true; 789 if (ent->fe_count == FE_COUNT_UNTRACKED) { 790 should_truncate = true; 791 TAILQ_REMOVE(&entries->fp_entry_queue, ent, 792 fe_entries); 793 fp_free(ent); 794 } 795 } else if (ent->fe_type == FAIL_POINT_PAUSE) { 796 should_truncate = true; 797 } else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags & 798 FAIL_POINT_NONSLEEPABLE)) { 799 /** 800 * If this fail point is annotated as being in a 801 * non-sleepable ctx, convert sleep to delay and 802 * convert the msec argument to usecs. 803 */ 804 printf("Sleep call request on fail point in " 805 "non-sleepable context; using delay instead " 806 "of sleep\n"); 807 ent->fe_type = FAIL_POINT_DELAY; 808 ent->fe_arg *= 1000; 809 } 810 } |
356 | 811 |
357 /* Get rid of useless zero probability entries. */ 358 TAILQ_FOREACH_SAFE(ent, &fp->fp_entries, fe_entries, ent_next) { 359 if (ent->fe_prob == 0) 360 free_entry(&fp->fp_entries, ent); | 812 if (TAILQ_EMPTY(&entries->fp_entry_queue)) { 813 entries = fail_point_swap_settings(fp, NULL); 814 if (entries != NULL) 815 wakeup(FP_PAUSE_CHANNEL(fp)); 816 } else { 817 if (should_wake_paused) 818 wakeup(FP_PAUSE_CHANNEL(fp)); 819 fail_point_swap_settings(fp, entries); |
361 } 362 | 820 } 821 |
363 /* Get rid of "off"s at the end. */ 364 while ((ent = TAILQ_LAST(&fp->fp_entries, fail_point_entries)) && 365 ent->fe_type == FAIL_POINT_OFF) 366 free_entry(&fp->fp_entries, ent); 367 368 FP_UNLOCK(); 369 370 end: | 822end: |
371#ifdef IWARNING 372 if (error) 373 IWARNING("Failed to set %s %s to %s", 374 fp->fp_name, fp->fp_location, buf); 375 else 376 INOTICE("Set %s %s to %s", 377 fp->fp_name, fp->fp_location, buf); 378#endif /* IWARNING */ 379 | 823#ifdef IWARNING 824 if (error) 825 IWARNING("Failed to set %s %s to %s", 826 fp->fp_name, fp->fp_location, buf); 827 else 828 INOTICE("Set %s %s to %s", 829 fp->fp_name, fp->fp_location, buf); 830#endif /* IWARNING */ 831 |
832 fail_point_setting_release_ref(fp); |
|
380 return (error); 381} 382 383#define MAX_FAIL_POINT_BUF 1023 384 385/** 386 * Handle kernel failpoint set/get. 387 */ | 833 return (error); 834} 835 836#define MAX_FAIL_POINT_BUF 1023 837 838/** 839 * Handle kernel failpoint set/get. 840 */ |
841 |
|
388int 389fail_point_sysctl(SYSCTL_HANDLER_ARGS) 390{ | 842int 843fail_point_sysctl(SYSCTL_HANDLER_ARGS) 844{ |
391 struct fail_point *fp = arg1; 392 char *buf = NULL; | 845 struct fail_point *fp; 846 char *buf; 847 struct sbuf *sb_check; |
393 struct sbuf sb; 394 int error; 395 | 848 struct sbuf sb; 849 int error; 850 |
396 /* Retrieving */ 397 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND | SBUF_INCLUDENUL); 398 fail_point_get(fp, &sb); 399 sbuf_trim(&sb); 400 error = sbuf_finish(&sb); 401 if (error == 0) 402 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); 403 sbuf_delete(&sb); | 851 error = 0; 852 fp = arg1; 853 buf = NULL; |
404 | 854 |
855 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND); 856 if (sb_check != &sb) 857 return (ENOMEM); 858 859 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req); 860 |
|
405 /* Setting */ | 861 /* Setting */ |
406 if (!error && req->newptr) { | 862 /** 863 * Lock protects any new entries from being garbage collected before we 864 * can link them to the fail point. 865 */ 866 sx_xlock(&sx_fp_set); 867 if (req->newptr) { |
407 if (req->newlen > MAX_FAIL_POINT_BUF) { 408 error = EINVAL; 409 goto out; 410 } 411 412 buf = fp_malloc(req->newlen + 1, M_WAITOK); 413 414 error = SYSCTL_IN(req, buf, req->newlen); 415 if (error) 416 goto out; 417 buf[req->newlen] = '\0'; 418 419 error = fail_point_set(fp, buf); | 868 if (req->newlen > MAX_FAIL_POINT_BUF) { 869 error = EINVAL; 870 goto out; 871 } 872 873 buf = fp_malloc(req->newlen + 1, M_WAITOK); 874 875 error = SYSCTL_IN(req, buf, req->newlen); 876 if (error) 877 goto out; 878 buf[req->newlen] = '\0'; 879 880 error = fail_point_set(fp, buf); |
420 } | 881 } |
421 | 882 |
883 fail_point_garbage_collect(); 884 sx_xunlock(&sx_fp_set); 885 886 /* Retrieving. */ 887 fail_point_get(fp, &sb, false); 888 |
|
422out: | 889out: |
423 fp_free(buf); | 890 sbuf_finish(&sb); 891 sbuf_delete(&sb); 892 893 if (buf) 894 fp_free(buf); 895 |
424 return (error); 425} 426 | 896 return (error); 897} 898 |
899int 900fail_point_sysctl_status(SYSCTL_HANDLER_ARGS) 901{ 902 struct fail_point *fp; 903 struct sbuf sb, *sb_check; 904 905 fp = arg1; 906 907 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND); 908 if (sb_check != &sb) 909 return (ENOMEM); 910 911 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req); 912 913 /* Retrieving. */ 914 fail_point_get(fp, &sb, true); 915 916 sbuf_finish(&sb); 917 sbuf_delete(&sb); 918 919 /** 920 * Lock protects any new entries from being garbage collected before we 921 * can link them to the fail point. 922 */ 923 sx_xlock(&sx_fp_set); 924 fail_point_garbage_collect(); 925 sx_xunlock(&sx_fp_set); 926 927 return (0); 928} 929 930int 931fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len) 932{ 933 struct sysctl_req *sa; 934 int error; 935 936 sa = sysctl_args; 937 938 error = SYSCTL_OUT(sa, buf, len); 939 940 if (error == ENOMEM) 941 return (-1); 942 else 943 return (len); 944} 945 946 |
|
427/** 428 * Internal helper function to translate a human-readable failpoint string 429 * into a internally-parsable fail_point structure. 430 */ 431static char * | 947/** 948 * Internal helper function to translate a human-readable failpoint string 949 * into a internally-parsable fail_point structure. 950 */ 951static char * |
432parse_fail_point(struct fail_point_entries *ents, char *p) | 952parse_fail_point(struct fail_point_setting *ents, char *p) |
433{ 434 /* <fail_point> :: 435 * <term> ( "->" <term> )* 436 */ | 953{ 954 /* <fail_point> :: 955 * <term> ( "->" <term> )* 956 */ |
957 uint8_t term_count; 958 959 term_count = 1; 960 |
|
437 p = parse_term(ents, p); 438 if (p == NULL) 439 return (NULL); | 961 p = parse_term(ents, p); 962 if (p == NULL) 963 return (NULL); |
964 |
|
440 while (*p != '\0') { | 965 while (*p != '\0') { |
441 if (p[0] != '-' || p[1] != '>') | 966 term_count++; 967 if (p[0] != '-' || p[1] != '>' || 968 (p = parse_term(ents, p+2)) == NULL || 969 term_count > FP_MAX_ENTRY_COUNT) |
442 return (NULL); | 970 return (NULL); |
443 p = parse_term(ents, p + 2); 444 if (p == NULL) 445 return (NULL); | |
446 } 447 return (p); 448} 449 450/** 451 * Internal helper function to parse an individual term from a failpoint. 452 */ 453static char * | 971 } 972 return (p); 973} 974 975/** 976 * Internal helper function to parse an individual term from a failpoint. 977 */ 978static char * |
454parse_term(struct fail_point_entries *ents, char *p) | 979parse_term(struct fail_point_setting *ents, char *p) |
455{ 456 struct fail_point_entry *ent; 457 | 980{ 981 struct fail_point_entry *ent; 982 |
458 ent = fp_malloc(sizeof *ent, M_WAITOK | M_ZERO); 459 ent->fe_prob = PROB_MAX; 460 ent->fe_pid = NO_PID; 461 TAILQ_INSERT_TAIL(ents, ent, fe_entries); | 983 ent = fail_point_entry_new(ents); |
462 463 /* 464 * <term> :: 465 * ( (<float> "%") | (<integer> "*" ) )* 466 * <type> 467 * [ "(" <integer> ")" ] 468 * [ "[pid " <integer> "]" ] 469 */ --- 8 unchanged lines hidden (view full) --- 478 479 if (*p == '%') { 480 if (units > 100) /* prevent overflow early */ 481 units = 100; 482 ent->fe_prob = units * (PROB_MAX / 100) + decimal; 483 if (ent->fe_prob > PROB_MAX) 484 ent->fe_prob = PROB_MAX; 485 } else if (*p == '*') { | 984 985 /* 986 * <term> :: 987 * ( (<float> "%") | (<integer> "*" ) )* 988 * <type> 989 * [ "(" <integer> ")" ] 990 * [ "[pid " <integer> "]" ] 991 */ --- 8 unchanged lines hidden (view full) --- 1000 1001 if (*p == '%') { 1002 if (units > 100) /* prevent overflow early */ 1003 units = 100; 1004 ent->fe_prob = units * (PROB_MAX / 100) + decimal; 1005 if (ent->fe_prob > PROB_MAX) 1006 ent->fe_prob = PROB_MAX; 1007 } else if (*p == '*') { |
486 if (!units || decimal) | 1008 if (!units || units < 0 || decimal) |
487 return (NULL); 488 ent->fe_count = units; 489 } else 490 return (NULL); 491 p++; 492 } 493 494 /* <type> */ 495 p = parse_type(ent, p); 496 if (p == NULL) 497 return (NULL); 498 if (*p == '\0') 499 return (p); 500 501 /* [ "(" <integer> ")" ] */ 502 if (*p != '(') | 1009 return (NULL); 1010 ent->fe_count = units; 1011 } else 1012 return (NULL); 1013 p++; 1014 } 1015 1016 /* <type> */ 1017 p = parse_type(ent, p); 1018 if (p == NULL) 1019 return (NULL); 1020 if (*p == '\0') 1021 return (p); 1022 1023 /* [ "(" <integer> ")" ] */ 1024 if (*p != '(') |
503 return p; | 1025 return (p); |
504 p++; 505 if (!isdigit(*p) && *p != '-') 506 return (NULL); 507 ent->fe_arg = strtol(p, &p, 0); 508 if (*p++ != ')') 509 return (NULL); 510 511 /* [ "[pid " <integer> "]" ] */ | 1026 p++; 1027 if (!isdigit(*p) && *p != '-') 1028 return (NULL); 1029 ent->fe_arg = strtol(p, &p, 0); 1030 if (*p++ != ')') 1031 return (NULL); 1032 1033 /* [ "[pid " <integer> "]" ] */ |
512#define PID_STRING "[pid " | 1034#define PID_STRING "[pid " |
513 if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0) 514 return (p); 515 p += sizeof(PID_STRING) - 1; 516 if (!isdigit(*p)) 517 return (NULL); 518 ent->fe_pid = strtol(p, &p, 0); 519 if (*p++ != ']') 520 return (NULL); --- 4 unchanged lines hidden (view full) --- 525/** 526 * Internal helper function to parse a numeric for a failpoint term. 527 */ 528static char * 529parse_number(int *out_units, int *out_decimal, char *p) 530{ 531 char *old_p; 532 | 1035 if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0) 1036 return (p); 1037 p += sizeof(PID_STRING) - 1; 1038 if (!isdigit(*p)) 1039 return (NULL); 1040 ent->fe_pid = strtol(p, &p, 0); 1041 if (*p++ != ']') 1042 return (NULL); --- 4 unchanged lines hidden (view full) --- 1047/** 1048 * Internal helper function to parse a numeric for a failpoint term. 1049 */ 1050static char * 1051parse_number(int *out_units, int *out_decimal, char *p) 1052{ 1053 char *old_p; 1054 |
533 /* | 1055 /** |
534 * <number> :: 535 * <integer> [ "." <integer> ] | 536 * "." <integer> 537 */ 538 539 /* whole part */ 540 old_p = p; 541 *out_units = strtol(p, &p, 10); --- 37 unchanged lines hidden (view full) --- 579 if (strncmp(fail_type_strings[type].name, beg, len) == 0) { 580 ent->fe_type = type; 581 return (beg + len); 582 } 583 } 584 return (NULL); 585} 586 | 1056 * <number> :: 1057 * <integer> [ "." <integer> ] | 1058 * "." <integer> 1059 */ 1060 1061 /* whole part */ 1062 old_p = p; 1063 *out_units = strtol(p, &p, 10); --- 37 unchanged lines hidden (view full) --- 1101 if (strncmp(fail_type_strings[type].name, beg, len) == 0) { 1102 ent->fe_type = type; 1103 return (beg + len); 1104 } 1105 } 1106 return (NULL); 1107} 1108 |
587/** 588 * Internal helper function to free an individual failpoint term. 589 */ 590static void 591free_entry(struct fail_point_entries *ents, struct fail_point_entry *ent) 592{ 593 TAILQ_REMOVE(ents, ent, fe_entries); 594 fp_free(ent); 595} | 1109/* The fail point sysctl tree. */ 1110SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points"); |
596 | 1111 |
597/** 598 * Internal helper function to clear out all failpoint terms for a single 599 * failpoint. 600 */ 601static void 602clear_entries(struct fail_point_entries *ents) | 1112/* Debugging/testing stuff for fail point */ 1113static int 1114sysctl_test_fail_point(SYSCTL_HANDLER_ARGS) |
603{ | 1115{ |
604 struct fail_point_entry *ent, *ent_next; | |
605 | 1116 |
606 TAILQ_FOREACH_SAFE(ent, ents, fe_entries, ent_next) 607 fp_free(ent); 608 TAILQ_INIT(ents); | 1117 KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point); 1118 return (0); |
609} | 1119} |
610 611/* The fail point sysctl tree. */ 612SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points"); | 1120SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point, 1121 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_test_fail_point, "A", 1122 "Trigger test fail points"); |