1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/panic.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * This function is used through-out the kernel (including mm and fs) 10 * to indicate a major problem. 11 */ 12 #include <linux/debug_locks.h> 13 #include <linux/sched/debug.h> 14 #include <linux/interrupt.h> 15 #include <linux/kgdb.h> 16 #include <linux/kmsg_dump.h> 17 #include <linux/kallsyms.h> 18 #include <linux/notifier.h> 19 #include <linux/vt_kern.h> 20 #include <linux/module.h> 21 #include <linux/random.h> 22 #include <linux/ftrace.h> 23 #include <linux/reboot.h> 24 #include <linux/delay.h> 25 #include <linux/kexec.h> 26 #include <linux/panic_notifier.h> 27 #include <linux/sched.h> 28 #include <linux/sysrq.h> 29 #include <linux/init.h> 30 #include <linux/nmi.h> 31 #include <linux/console.h> 32 #include <linux/bug.h> 33 #include <linux/ratelimit.h> 34 #include <linux/debugfs.h> 35 #include <trace/events/error_report.h> 36 #include <asm/sections.h> 37 38 #define PANIC_TIMER_STEP 100 39 #define PANIC_BLINK_SPD 18 40 41 #ifdef CONFIG_SMP 42 /* 43 * Should we dump all CPUs backtraces in an oops event? 44 * Defaults to 0, can be changed via sysctl. 45 */ 46 static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; 47 #else 48 #define sysctl_oops_all_cpu_backtrace 0 49 #endif /* CONFIG_SMP */ 50 51 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; 52 static unsigned long tainted_mask = 53 IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; 54 static int pause_on_oops; 55 static int pause_on_oops_flag; 56 static DEFINE_SPINLOCK(pause_on_oops_lock); 57 bool crash_kexec_post_notifiers; 58 int panic_on_warn __read_mostly; 59 unsigned long panic_on_taint; 60 bool panic_on_taint_nousertaint = false; 61 62 int panic_timeout = CONFIG_PANIC_TIMEOUT; 63 EXPORT_SYMBOL_GPL(panic_timeout); 64 65 #define PANIC_PRINT_TASK_INFO 0x00000001 66 #define PANIC_PRINT_MEM_INFO 0x00000002 67 #define PANIC_PRINT_TIMER_INFO 0x00000004 68 #define PANIC_PRINT_LOCK_INFO 0x00000008 69 #define PANIC_PRINT_FTRACE_INFO 0x00000010 70 #define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020 71 #define PANIC_PRINT_ALL_CPU_BT 0x00000040 72 unsigned long panic_print; 73 74 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 75 76 EXPORT_SYMBOL(panic_notifier_list); 77 78 #if defined(CONFIG_SMP) && defined(CONFIG_SYSCTL) 79 static struct ctl_table kern_panic_table[] = { 80 { 81 .procname = "oops_all_cpu_backtrace", 82 .data = &sysctl_oops_all_cpu_backtrace, 83 .maxlen = sizeof(int), 84 .mode = 0644, 85 .proc_handler = proc_dointvec_minmax, 86 .extra1 = SYSCTL_ZERO, 87 .extra2 = SYSCTL_ONE, 88 }, 89 { } 90 }; 91 92 static __init int kernel_panic_sysctls_init(void) 93 { 94 register_sysctl_init("kernel", kern_panic_table); 95 return 0; 96 } 97 late_initcall(kernel_panic_sysctls_init); 98 #endif 99 100 static long no_blink(int state) 101 { 102 return 0; 103 } 104 105 /* Returns how long it waited in ms */ 106 long (*panic_blink)(int state); 107 EXPORT_SYMBOL(panic_blink); 108 109 /* 110 * Stop ourself in panic -- architecture code may override this 111 */ 112 void __weak panic_smp_self_stop(void) 113 { 114 while (1) 115 cpu_relax(); 116 } 117 118 /* 119 * Stop ourselves in NMI context if another CPU has already panicked. Arch code 120 * may override this to prepare for crash dumping, e.g. save regs info. 121 */ 122 void __weak nmi_panic_self_stop(struct pt_regs *regs) 123 { 124 panic_smp_self_stop(); 125 } 126 127 /* 128 * Stop other CPUs in panic. Architecture dependent code may override this 129 * with more suitable version. For example, if the architecture supports 130 * crash dump, it should save registers of each stopped CPU and disable 131 * per-CPU features such as virtualization extensions. 132 */ 133 void __weak crash_smp_send_stop(void) 134 { 135 static int cpus_stopped; 136 137 /* 138 * This function can be called twice in panic path, but obviously 139 * we execute this only once. 140 */ 141 if (cpus_stopped) 142 return; 143 144 /* 145 * Note smp_send_stop is the usual smp shutdown function, which 146 * unfortunately means it may not be hardened to work in a panic 147 * situation. 148 */ 149 smp_send_stop(); 150 cpus_stopped = 1; 151 } 152 153 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); 154 155 /* 156 * A variant of panic() called from NMI context. We return if we've already 157 * panicked on this CPU. If another CPU already panicked, loop in 158 * nmi_panic_self_stop() which can provide architecture dependent code such 159 * as saving register state for crash dump. 160 */ 161 void nmi_panic(struct pt_regs *regs, const char *msg) 162 { 163 int old_cpu, cpu; 164 165 cpu = raw_smp_processor_id(); 166 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); 167 168 if (old_cpu == PANIC_CPU_INVALID) 169 panic("%s", msg); 170 else if (old_cpu != cpu) 171 nmi_panic_self_stop(regs); 172 } 173 EXPORT_SYMBOL(nmi_panic); 174 175 static void panic_print_sys_info(bool console_flush) 176 { 177 if (console_flush) { 178 if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG) 179 console_flush_on_panic(CONSOLE_REPLAY_ALL); 180 return; 181 } 182 183 if (panic_print & PANIC_PRINT_ALL_CPU_BT) 184 trigger_all_cpu_backtrace(); 185 186 if (panic_print & PANIC_PRINT_TASK_INFO) 187 show_state(); 188 189 if (panic_print & PANIC_PRINT_MEM_INFO) 190 show_mem(0, NULL); 191 192 if (panic_print & PANIC_PRINT_TIMER_INFO) 193 sysrq_timer_list_show(); 194 195 if (panic_print & PANIC_PRINT_LOCK_INFO) 196 debug_show_all_locks(); 197 198 if (panic_print & PANIC_PRINT_FTRACE_INFO) 199 ftrace_dump(DUMP_ALL); 200 } 201 202 /** 203 * panic - halt the system 204 * @fmt: The text string to print 205 * 206 * Display a message, then perform cleanups. 207 * 208 * This function never returns. 209 */ 210 void panic(const char *fmt, ...) 211 { 212 static char buf[1024]; 213 va_list args; 214 long i, i_next = 0, len; 215 int state = 0; 216 int old_cpu, this_cpu; 217 bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; 218 219 if (panic_on_warn) { 220 /* 221 * This thread may hit another WARN() in the panic path. 222 * Resetting this prevents additional WARN() from panicking the 223 * system on this thread. Other threads are blocked by the 224 * panic_mutex in panic(). 225 */ 226 panic_on_warn = 0; 227 } 228 229 /* 230 * Disable local interrupts. This will prevent panic_smp_self_stop 231 * from deadlocking the first cpu that invokes the panic, since 232 * there is nothing to prevent an interrupt handler (that runs 233 * after setting panic_cpu) from invoking panic() again. 234 */ 235 local_irq_disable(); 236 preempt_disable_notrace(); 237 238 /* 239 * It's possible to come here directly from a panic-assertion and 240 * not have preempt disabled. Some functions called from here want 241 * preempt to be disabled. No point enabling it later though... 242 * 243 * Only one CPU is allowed to execute the panic code from here. For 244 * multiple parallel invocations of panic, all other CPUs either 245 * stop themself or will wait until they are stopped by the 1st CPU 246 * with smp_send_stop(). 247 * 248 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which 249 * comes here, so go ahead. 250 * `old_cpu == this_cpu' means we came from nmi_panic() which sets 251 * panic_cpu to this CPU. In this case, this is also the 1st CPU. 252 */ 253 this_cpu = raw_smp_processor_id(); 254 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); 255 256 if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) 257 panic_smp_self_stop(); 258 259 console_verbose(); 260 bust_spinlocks(1); 261 va_start(args, fmt); 262 len = vscnprintf(buf, sizeof(buf), fmt, args); 263 va_end(args); 264 265 if (len && buf[len - 1] == '\n') 266 buf[len - 1] = '\0'; 267 268 pr_emerg("Kernel panic - not syncing: %s\n", buf); 269 #ifdef CONFIG_DEBUG_BUGVERBOSE 270 /* 271 * Avoid nested stack-dumping if a panic occurs during oops processing 272 */ 273 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) 274 dump_stack(); 275 #endif 276 277 /* 278 * If kgdb is enabled, give it a chance to run before we stop all 279 * the other CPUs or else we won't be able to debug processes left 280 * running on them. 281 */ 282 kgdb_panic(buf); 283 284 /* 285 * If we have crashed and we have a crash kernel loaded let it handle 286 * everything else. 287 * If we want to run this after calling panic_notifiers, pass 288 * the "crash_kexec_post_notifiers" option to the kernel. 289 * 290 * Bypass the panic_cpu check and call __crash_kexec directly. 291 */ 292 if (!_crash_kexec_post_notifiers) { 293 __crash_kexec(NULL); 294 295 /* 296 * Note smp_send_stop is the usual smp shutdown function, which 297 * unfortunately means it may not be hardened to work in a 298 * panic situation. 299 */ 300 try_block_console_kthreads(10000); 301 smp_send_stop(); 302 } else { 303 /* 304 * If we want to do crash dump after notifier calls and 305 * kmsg_dump, we will need architecture dependent extra 306 * works in addition to stopping other CPUs. 307 */ 308 try_block_console_kthreads(10000); 309 crash_smp_send_stop(); 310 } 311 312 /* 313 * Run any panic handlers, including those that might need to 314 * add information to the kmsg dump output. 315 */ 316 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 317 318 panic_print_sys_info(false); 319 320 kmsg_dump(KMSG_DUMP_PANIC); 321 322 /* 323 * If you doubt kdump always works fine in any situation, 324 * "crash_kexec_post_notifiers" offers you a chance to run 325 * panic_notifiers and dumping kmsg before kdump. 326 * Note: since some panic_notifiers can make crashed kernel 327 * more unstable, it can increase risks of the kdump failure too. 328 * 329 * Bypass the panic_cpu check and call __crash_kexec directly. 330 */ 331 if (_crash_kexec_post_notifiers) 332 __crash_kexec(NULL); 333 334 #ifdef CONFIG_VT 335 unblank_screen(); 336 #endif 337 console_unblank(); 338 339 /* 340 * We may have ended up stopping the CPU holding the lock (in 341 * smp_send_stop()) while still having some valuable data in the console 342 * buffer. Try to acquire the lock then release it regardless of the 343 * result. The release will also print the buffers out. Locks debug 344 * should be disabled to avoid reporting bad unlock balance when 345 * panic() is not being callled from OOPS. 346 */ 347 debug_locks_off(); 348 console_flush_on_panic(CONSOLE_FLUSH_PENDING); 349 350 panic_print_sys_info(true); 351 352 if (!panic_blink) 353 panic_blink = no_blink; 354 355 if (panic_timeout > 0) { 356 /* 357 * Delay timeout seconds before rebooting the machine. 358 * We can't use the "normal" timers since we just panicked. 359 */ 360 pr_emerg("Rebooting in %d seconds..\n", panic_timeout); 361 362 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 363 touch_nmi_watchdog(); 364 if (i >= i_next) { 365 i += panic_blink(state ^= 1); 366 i_next = i + 3600 / PANIC_BLINK_SPD; 367 } 368 mdelay(PANIC_TIMER_STEP); 369 } 370 } 371 if (panic_timeout != 0) { 372 /* 373 * This will not be a clean reboot, with everything 374 * shutting down. But if there is a chance of 375 * rebooting the system it will be rebooted. 376 */ 377 if (panic_reboot_mode != REBOOT_UNDEFINED) 378 reboot_mode = panic_reboot_mode; 379 emergency_restart(); 380 } 381 #ifdef __sparc__ 382 { 383 extern int stop_a_enabled; 384 /* Make sure the user can actually press Stop-A (L1-A) */ 385 stop_a_enabled = 1; 386 pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" 387 "twice on console to return to the boot prom\n"); 388 } 389 #endif 390 #if defined(CONFIG_S390) 391 disabled_wait(); 392 #endif 393 pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); 394 395 /* Do not scroll important messages printed above */ 396 suppress_printk = 1; 397 local_irq_enable(); 398 for (i = 0; ; i += PANIC_TIMER_STEP) { 399 touch_softlockup_watchdog(); 400 if (i >= i_next) { 401 i += panic_blink(state ^= 1); 402 i_next = i + 3600 / PANIC_BLINK_SPD; 403 } 404 mdelay(PANIC_TIMER_STEP); 405 } 406 } 407 408 EXPORT_SYMBOL(panic); 409 410 /* 411 * TAINT_FORCED_RMMOD could be a per-module flag but the module 412 * is being removed anyway. 413 */ 414 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { 415 [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, 416 [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, 417 [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, 418 [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, 419 [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, 420 [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, 421 [ TAINT_USER ] = { 'U', ' ', false }, 422 [ TAINT_DIE ] = { 'D', ' ', false }, 423 [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, 424 [ TAINT_WARN ] = { 'W', ' ', false }, 425 [ TAINT_CRAP ] = { 'C', ' ', true }, 426 [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, 427 [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, 428 [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, 429 [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, 430 [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, 431 [ TAINT_AUX ] = { 'X', ' ', true }, 432 [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, 433 }; 434 435 /** 436 * print_tainted - return a string to represent the kernel taint state. 437 * 438 * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst 439 * 440 * The string is overwritten by the next call to print_tainted(), 441 * but is always NULL terminated. 442 */ 443 const char *print_tainted(void) 444 { 445 static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; 446 447 BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); 448 449 if (tainted_mask) { 450 char *s; 451 int i; 452 453 s = buf + sprintf(buf, "Tainted: "); 454 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 455 const struct taint_flag *t = &taint_flags[i]; 456 *s++ = test_bit(i, &tainted_mask) ? 457 t->c_true : t->c_false; 458 } 459 *s = 0; 460 } else 461 snprintf(buf, sizeof(buf), "Not tainted"); 462 463 return buf; 464 } 465 466 int test_taint(unsigned flag) 467 { 468 return test_bit(flag, &tainted_mask); 469 } 470 EXPORT_SYMBOL(test_taint); 471 472 unsigned long get_taint(void) 473 { 474 return tainted_mask; 475 } 476 477 /** 478 * add_taint: add a taint flag if not already set. 479 * @flag: one of the TAINT_* constants. 480 * @lockdep_ok: whether lock debugging is still OK. 481 * 482 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for 483 * some notewortht-but-not-corrupting cases, it can be set to true. 484 */ 485 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) 486 { 487 if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) 488 pr_warn("Disabling lock debugging due to kernel taint\n"); 489 490 set_bit(flag, &tainted_mask); 491 492 if (tainted_mask & panic_on_taint) { 493 panic_on_taint = 0; 494 panic("panic_on_taint set ..."); 495 } 496 } 497 EXPORT_SYMBOL(add_taint); 498 499 static void spin_msec(int msecs) 500 { 501 int i; 502 503 for (i = 0; i < msecs; i++) { 504 touch_nmi_watchdog(); 505 mdelay(1); 506 } 507 } 508 509 /* 510 * It just happens that oops_enter() and oops_exit() are identically 511 * implemented... 512 */ 513 static void do_oops_enter_exit(void) 514 { 515 unsigned long flags; 516 static int spin_counter; 517 518 if (!pause_on_oops) 519 return; 520 521 spin_lock_irqsave(&pause_on_oops_lock, flags); 522 if (pause_on_oops_flag == 0) { 523 /* This CPU may now print the oops message */ 524 pause_on_oops_flag = 1; 525 } else { 526 /* We need to stall this CPU */ 527 if (!spin_counter) { 528 /* This CPU gets to do the counting */ 529 spin_counter = pause_on_oops; 530 do { 531 spin_unlock(&pause_on_oops_lock); 532 spin_msec(MSEC_PER_SEC); 533 spin_lock(&pause_on_oops_lock); 534 } while (--spin_counter); 535 pause_on_oops_flag = 0; 536 } else { 537 /* This CPU waits for a different one */ 538 while (spin_counter) { 539 spin_unlock(&pause_on_oops_lock); 540 spin_msec(1); 541 spin_lock(&pause_on_oops_lock); 542 } 543 } 544 } 545 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 546 } 547 548 /* 549 * Return true if the calling CPU is allowed to print oops-related info. 550 * This is a bit racy.. 551 */ 552 bool oops_may_print(void) 553 { 554 return pause_on_oops_flag == 0; 555 } 556 557 /* 558 * Called when the architecture enters its oops handler, before it prints 559 * anything. If this is the first CPU to oops, and it's oopsing the first 560 * time then let it proceed. 561 * 562 * This is all enabled by the pause_on_oops kernel boot option. We do all 563 * this to ensure that oopses don't scroll off the screen. It has the 564 * side-effect of preventing later-oopsing CPUs from mucking up the display, 565 * too. 566 * 567 * It turns out that the CPU which is allowed to print ends up pausing for 568 * the right duration, whereas all the other CPUs pause for twice as long: 569 * once in oops_enter(), once in oops_exit(). 570 */ 571 void oops_enter(void) 572 { 573 tracing_off(); 574 /* can't trust the integrity of the kernel anymore: */ 575 debug_locks_off(); 576 do_oops_enter_exit(); 577 578 if (sysctl_oops_all_cpu_backtrace) 579 trigger_all_cpu_backtrace(); 580 } 581 582 static void print_oops_end_marker(void) 583 { 584 pr_warn("---[ end trace %016llx ]---\n", 0ULL); 585 } 586 587 /* 588 * Called when the architecture exits its oops handler, after printing 589 * everything. 590 */ 591 void oops_exit(void) 592 { 593 do_oops_enter_exit(); 594 print_oops_end_marker(); 595 kmsg_dump(KMSG_DUMP_OOPS); 596 } 597 598 struct warn_args { 599 const char *fmt; 600 va_list args; 601 }; 602 603 void __warn(const char *file, int line, void *caller, unsigned taint, 604 struct pt_regs *regs, struct warn_args *args) 605 { 606 disable_trace_on_warning(); 607 608 printk_prefer_direct_enter(); 609 610 if (file) 611 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", 612 raw_smp_processor_id(), current->pid, file, line, 613 caller); 614 else 615 pr_warn("WARNING: CPU: %d PID: %d at %pS\n", 616 raw_smp_processor_id(), current->pid, caller); 617 618 if (args) 619 vprintk(args->fmt, args->args); 620 621 print_modules(); 622 623 if (regs) 624 show_regs(regs); 625 626 if (panic_on_warn) 627 panic("panic_on_warn set ...\n"); 628 629 if (!regs) 630 dump_stack(); 631 632 print_irqtrace_events(current); 633 634 print_oops_end_marker(); 635 trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller); 636 637 /* Just a warning, don't kill lockdep. */ 638 add_taint(taint, LOCKDEP_STILL_OK); 639 640 printk_prefer_direct_exit(); 641 } 642 643 #ifndef __WARN_FLAGS 644 void warn_slowpath_fmt(const char *file, int line, unsigned taint, 645 const char *fmt, ...) 646 { 647 struct warn_args args; 648 649 pr_warn(CUT_HERE); 650 651 if (!fmt) { 652 __warn(file, line, __builtin_return_address(0), taint, 653 NULL, NULL); 654 return; 655 } 656 657 args.fmt = fmt; 658 va_start(args.args, fmt); 659 __warn(file, line, __builtin_return_address(0), taint, NULL, &args); 660 va_end(args.args); 661 } 662 EXPORT_SYMBOL(warn_slowpath_fmt); 663 #else 664 void __warn_printk(const char *fmt, ...) 665 { 666 va_list args; 667 668 pr_warn(CUT_HERE); 669 670 va_start(args, fmt); 671 vprintk(fmt, args); 672 va_end(args); 673 } 674 EXPORT_SYMBOL(__warn_printk); 675 #endif 676 677 #ifdef CONFIG_BUG 678 679 /* Support resetting WARN*_ONCE state */ 680 681 static int clear_warn_once_set(void *data, u64 val) 682 { 683 generic_bug_clear_once(); 684 memset(__start_once, 0, __end_once - __start_once); 685 return 0; 686 } 687 688 DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, 689 "%lld\n"); 690 691 static __init int register_warn_debugfs(void) 692 { 693 /* Don't care about failure */ 694 debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, 695 &clear_warn_once_fops); 696 return 0; 697 } 698 699 device_initcall(register_warn_debugfs); 700 #endif 701 702 #ifdef CONFIG_STACKPROTECTOR 703 704 /* 705 * Called when gcc's -fstack-protector feature is used, and 706 * gcc detects corruption of the on-stack canary value 707 */ 708 __visible noinstr void __stack_chk_fail(void) 709 { 710 instrumentation_begin(); 711 panic("stack-protector: Kernel stack is corrupted in: %pB", 712 __builtin_return_address(0)); 713 instrumentation_end(); 714 } 715 EXPORT_SYMBOL(__stack_chk_fail); 716 717 #endif 718 719 core_param(panic, panic_timeout, int, 0644); 720 core_param(panic_print, panic_print, ulong, 0644); 721 core_param(pause_on_oops, pause_on_oops, int, 0644); 722 core_param(panic_on_warn, panic_on_warn, int, 0644); 723 core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); 724 725 static int __init oops_setup(char *s) 726 { 727 if (!s) 728 return -EINVAL; 729 if (!strcmp(s, "panic")) 730 panic_on_oops = 1; 731 return 0; 732 } 733 early_param("oops", oops_setup); 734 735 static int __init panic_on_taint_setup(char *s) 736 { 737 char *taint_str; 738 739 if (!s) 740 return -EINVAL; 741 742 taint_str = strsep(&s, ","); 743 if (kstrtoul(taint_str, 16, &panic_on_taint)) 744 return -EINVAL; 745 746 /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */ 747 panic_on_taint &= TAINT_FLAGS_MAX; 748 749 if (!panic_on_taint) 750 return -EINVAL; 751 752 if (s && !strcmp(s, "nousertaint")) 753 panic_on_taint_nousertaint = true; 754 755 pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%sabled\n", 756 panic_on_taint, panic_on_taint_nousertaint ? "en" : "dis"); 757 758 return 0; 759 } 760 early_param("panic_on_taint", panic_on_taint_setup); 761