1 /* 2 * linux/kernel/panic.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * This function is used through-out the kernel (including mm and fs) 9 * to indicate a major problem. 10 */ 11 #include <linux/debug_locks.h> 12 #include <linux/interrupt.h> 13 #include <linux/kmsg_dump.h> 14 #include <linux/kallsyms.h> 15 #include <linux/notifier.h> 16 #include <linux/module.h> 17 #include <linux/random.h> 18 #include <linux/ftrace.h> 19 #include <linux/reboot.h> 20 #include <linux/delay.h> 21 #include <linux/kexec.h> 22 #include <linux/sched.h> 23 #include <linux/sysrq.h> 24 #include <linux/init.h> 25 #include <linux/nmi.h> 26 #include <linux/console.h> 27 #include <linux/bug.h> 28 29 #define PANIC_TIMER_STEP 100 30 #define PANIC_BLINK_SPD 18 31 32 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; 33 static unsigned long tainted_mask; 34 static int pause_on_oops; 35 static int pause_on_oops_flag; 36 static DEFINE_SPINLOCK(pause_on_oops_lock); 37 bool crash_kexec_post_notifiers; 38 int panic_on_warn __read_mostly; 39 40 int panic_timeout = CONFIG_PANIC_TIMEOUT; 41 EXPORT_SYMBOL_GPL(panic_timeout); 42 43 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 44 45 EXPORT_SYMBOL(panic_notifier_list); 46 47 static long no_blink(int state) 48 { 49 return 0; 50 } 51 52 /* Returns how long it waited in ms */ 53 long (*panic_blink)(int state); 54 EXPORT_SYMBOL(panic_blink); 55 56 /* 57 * Stop ourself in panic -- architecture code may override this 58 */ 59 void __weak panic_smp_self_stop(void) 60 { 61 while (1) 62 cpu_relax(); 63 } 64 65 /* 66 * Stop ourselves in NMI context if another CPU has already panicked. Arch code 67 * may override this to prepare for crash dumping, e.g. save regs info. 68 */ 69 void __weak nmi_panic_self_stop(struct pt_regs *regs) 70 { 71 panic_smp_self_stop(); 72 } 73 74 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); 75 76 /* 77 * A variant of panic() called from NMI context. We return if we've already 78 * panicked on this CPU. If another CPU already panicked, loop in 79 * nmi_panic_self_stop() which can provide architecture dependent code such 80 * as saving register state for crash dump. 81 */ 82 void nmi_panic(struct pt_regs *regs, const char *msg) 83 { 84 int old_cpu, cpu; 85 86 cpu = raw_smp_processor_id(); 87 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); 88 89 if (old_cpu == PANIC_CPU_INVALID) 90 panic("%s", msg); 91 else if (old_cpu != cpu) 92 nmi_panic_self_stop(regs); 93 } 94 EXPORT_SYMBOL(nmi_panic); 95 96 /** 97 * panic - halt the system 98 * @fmt: The text string to print 99 * 100 * Display a message, then perform cleanups. 101 * 102 * This function never returns. 103 */ 104 void panic(const char *fmt, ...) 105 { 106 static char buf[1024]; 107 va_list args; 108 long i, i_next = 0; 109 int state = 0; 110 int old_cpu, this_cpu; 111 112 /* 113 * Disable local interrupts. This will prevent panic_smp_self_stop 114 * from deadlocking the first cpu that invokes the panic, since 115 * there is nothing to prevent an interrupt handler (that runs 116 * after setting panic_cpu) from invoking panic() again. 117 */ 118 local_irq_disable(); 119 120 /* 121 * It's possible to come here directly from a panic-assertion and 122 * not have preempt disabled. Some functions called from here want 123 * preempt to be disabled. No point enabling it later though... 124 * 125 * Only one CPU is allowed to execute the panic code from here. For 126 * multiple parallel invocations of panic, all other CPUs either 127 * stop themself or will wait until they are stopped by the 1st CPU 128 * with smp_send_stop(). 129 * 130 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which 131 * comes here, so go ahead. 132 * `old_cpu == this_cpu' means we came from nmi_panic() which sets 133 * panic_cpu to this CPU. In this case, this is also the 1st CPU. 134 */ 135 this_cpu = raw_smp_processor_id(); 136 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); 137 138 if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) 139 panic_smp_self_stop(); 140 141 console_verbose(); 142 bust_spinlocks(1); 143 va_start(args, fmt); 144 vsnprintf(buf, sizeof(buf), fmt, args); 145 va_end(args); 146 pr_emerg("Kernel panic - not syncing: %s\n", buf); 147 #ifdef CONFIG_DEBUG_BUGVERBOSE 148 /* 149 * Avoid nested stack-dumping if a panic occurs during oops processing 150 */ 151 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) 152 dump_stack(); 153 #endif 154 155 /* 156 * If we have crashed and we have a crash kernel loaded let it handle 157 * everything else. 158 * If we want to run this after calling panic_notifiers, pass 159 * the "crash_kexec_post_notifiers" option to the kernel. 160 * 161 * Bypass the panic_cpu check and call __crash_kexec directly. 162 */ 163 if (!crash_kexec_post_notifiers) { 164 printk_nmi_flush_on_panic(); 165 __crash_kexec(NULL); 166 } 167 168 /* 169 * Note smp_send_stop is the usual smp shutdown function, which 170 * unfortunately means it may not be hardened to work in a panic 171 * situation. 172 */ 173 smp_send_stop(); 174 175 /* 176 * Run any panic handlers, including those that might need to 177 * add information to the kmsg dump output. 178 */ 179 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 180 181 /* Call flush even twice. It tries harder with a single online CPU */ 182 printk_nmi_flush_on_panic(); 183 kmsg_dump(KMSG_DUMP_PANIC); 184 185 /* 186 * If you doubt kdump always works fine in any situation, 187 * "crash_kexec_post_notifiers" offers you a chance to run 188 * panic_notifiers and dumping kmsg before kdump. 189 * Note: since some panic_notifiers can make crashed kernel 190 * more unstable, it can increase risks of the kdump failure too. 191 * 192 * Bypass the panic_cpu check and call __crash_kexec directly. 193 */ 194 if (crash_kexec_post_notifiers) 195 __crash_kexec(NULL); 196 197 bust_spinlocks(0); 198 199 /* 200 * We may have ended up stopping the CPU holding the lock (in 201 * smp_send_stop()) while still having some valuable data in the console 202 * buffer. Try to acquire the lock then release it regardless of the 203 * result. The release will also print the buffers out. Locks debug 204 * should be disabled to avoid reporting bad unlock balance when 205 * panic() is not being callled from OOPS. 206 */ 207 debug_locks_off(); 208 console_flush_on_panic(); 209 210 if (!panic_blink) 211 panic_blink = no_blink; 212 213 if (panic_timeout > 0) { 214 /* 215 * Delay timeout seconds before rebooting the machine. 216 * We can't use the "normal" timers since we just panicked. 217 */ 218 pr_emerg("Rebooting in %d seconds..", panic_timeout); 219 220 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 221 touch_nmi_watchdog(); 222 if (i >= i_next) { 223 i += panic_blink(state ^= 1); 224 i_next = i + 3600 / PANIC_BLINK_SPD; 225 } 226 mdelay(PANIC_TIMER_STEP); 227 } 228 } 229 if (panic_timeout != 0) { 230 /* 231 * This will not be a clean reboot, with everything 232 * shutting down. But if there is a chance of 233 * rebooting the system it will be rebooted. 234 */ 235 emergency_restart(); 236 } 237 #ifdef __sparc__ 238 { 239 extern int stop_a_enabled; 240 /* Make sure the user can actually press Stop-A (L1-A) */ 241 stop_a_enabled = 1; 242 pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); 243 } 244 #endif 245 #if defined(CONFIG_S390) 246 { 247 unsigned long caller; 248 249 caller = (unsigned long)__builtin_return_address(0); 250 disabled_wait(caller); 251 } 252 #endif 253 pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf); 254 local_irq_enable(); 255 for (i = 0; ; i += PANIC_TIMER_STEP) { 256 touch_softlockup_watchdog(); 257 if (i >= i_next) { 258 i += panic_blink(state ^= 1); 259 i_next = i + 3600 / PANIC_BLINK_SPD; 260 } 261 mdelay(PANIC_TIMER_STEP); 262 } 263 } 264 265 EXPORT_SYMBOL(panic); 266 267 268 struct tnt { 269 u8 bit; 270 char true; 271 char false; 272 }; 273 274 static const struct tnt tnts[] = { 275 { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, 276 { TAINT_FORCED_MODULE, 'F', ' ' }, 277 { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' }, 278 { TAINT_FORCED_RMMOD, 'R', ' ' }, 279 { TAINT_MACHINE_CHECK, 'M', ' ' }, 280 { TAINT_BAD_PAGE, 'B', ' ' }, 281 { TAINT_USER, 'U', ' ' }, 282 { TAINT_DIE, 'D', ' ' }, 283 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, 284 { TAINT_WARN, 'W', ' ' }, 285 { TAINT_CRAP, 'C', ' ' }, 286 { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, 287 { TAINT_OOT_MODULE, 'O', ' ' }, 288 { TAINT_UNSIGNED_MODULE, 'E', ' ' }, 289 { TAINT_SOFTLOCKUP, 'L', ' ' }, 290 { TAINT_LIVEPATCH, 'K', ' ' }, 291 }; 292 293 /** 294 * print_tainted - return a string to represent the kernel taint state. 295 * 296 * 'P' - Proprietary module has been loaded. 297 * 'F' - Module has been forcibly loaded. 298 * 'S' - SMP with CPUs not designed for SMP. 299 * 'R' - User forced a module unload. 300 * 'M' - System experienced a machine check exception. 301 * 'B' - System has hit bad_page. 302 * 'U' - Userspace-defined naughtiness. 303 * 'D' - Kernel has oopsed before 304 * 'A' - ACPI table overridden. 305 * 'W' - Taint on warning. 306 * 'C' - modules from drivers/staging are loaded. 307 * 'I' - Working around severe firmware bug. 308 * 'O' - Out-of-tree module has been loaded. 309 * 'E' - Unsigned module has been loaded. 310 * 'L' - A soft lockup has previously occurred. 311 * 'K' - Kernel has been live patched. 312 * 313 * The string is overwritten by the next call to print_tainted(). 314 */ 315 const char *print_tainted(void) 316 { 317 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")]; 318 319 if (tainted_mask) { 320 char *s; 321 int i; 322 323 s = buf + sprintf(buf, "Tainted: "); 324 for (i = 0; i < ARRAY_SIZE(tnts); i++) { 325 const struct tnt *t = &tnts[i]; 326 *s++ = test_bit(t->bit, &tainted_mask) ? 327 t->true : t->false; 328 } 329 *s = 0; 330 } else 331 snprintf(buf, sizeof(buf), "Not tainted"); 332 333 return buf; 334 } 335 336 int test_taint(unsigned flag) 337 { 338 return test_bit(flag, &tainted_mask); 339 } 340 EXPORT_SYMBOL(test_taint); 341 342 unsigned long get_taint(void) 343 { 344 return tainted_mask; 345 } 346 347 /** 348 * add_taint: add a taint flag if not already set. 349 * @flag: one of the TAINT_* constants. 350 * @lockdep_ok: whether lock debugging is still OK. 351 * 352 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for 353 * some notewortht-but-not-corrupting cases, it can be set to true. 354 */ 355 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) 356 { 357 if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) 358 pr_warn("Disabling lock debugging due to kernel taint\n"); 359 360 set_bit(flag, &tainted_mask); 361 } 362 EXPORT_SYMBOL(add_taint); 363 364 static void spin_msec(int msecs) 365 { 366 int i; 367 368 for (i = 0; i < msecs; i++) { 369 touch_nmi_watchdog(); 370 mdelay(1); 371 } 372 } 373 374 /* 375 * It just happens that oops_enter() and oops_exit() are identically 376 * implemented... 377 */ 378 static void do_oops_enter_exit(void) 379 { 380 unsigned long flags; 381 static int spin_counter; 382 383 if (!pause_on_oops) 384 return; 385 386 spin_lock_irqsave(&pause_on_oops_lock, flags); 387 if (pause_on_oops_flag == 0) { 388 /* This CPU may now print the oops message */ 389 pause_on_oops_flag = 1; 390 } else { 391 /* We need to stall this CPU */ 392 if (!spin_counter) { 393 /* This CPU gets to do the counting */ 394 spin_counter = pause_on_oops; 395 do { 396 spin_unlock(&pause_on_oops_lock); 397 spin_msec(MSEC_PER_SEC); 398 spin_lock(&pause_on_oops_lock); 399 } while (--spin_counter); 400 pause_on_oops_flag = 0; 401 } else { 402 /* This CPU waits for a different one */ 403 while (spin_counter) { 404 spin_unlock(&pause_on_oops_lock); 405 spin_msec(1); 406 spin_lock(&pause_on_oops_lock); 407 } 408 } 409 } 410 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 411 } 412 413 /* 414 * Return true if the calling CPU is allowed to print oops-related info. 415 * This is a bit racy.. 416 */ 417 int oops_may_print(void) 418 { 419 return pause_on_oops_flag == 0; 420 } 421 422 /* 423 * Called when the architecture enters its oops handler, before it prints 424 * anything. If this is the first CPU to oops, and it's oopsing the first 425 * time then let it proceed. 426 * 427 * This is all enabled by the pause_on_oops kernel boot option. We do all 428 * this to ensure that oopses don't scroll off the screen. It has the 429 * side-effect of preventing later-oopsing CPUs from mucking up the display, 430 * too. 431 * 432 * It turns out that the CPU which is allowed to print ends up pausing for 433 * the right duration, whereas all the other CPUs pause for twice as long: 434 * once in oops_enter(), once in oops_exit(). 435 */ 436 void oops_enter(void) 437 { 438 tracing_off(); 439 /* can't trust the integrity of the kernel anymore: */ 440 debug_locks_off(); 441 do_oops_enter_exit(); 442 } 443 444 /* 445 * 64-bit random ID for oopses: 446 */ 447 static u64 oops_id; 448 449 static int init_oops_id(void) 450 { 451 if (!oops_id) 452 get_random_bytes(&oops_id, sizeof(oops_id)); 453 else 454 oops_id++; 455 456 return 0; 457 } 458 late_initcall(init_oops_id); 459 460 void print_oops_end_marker(void) 461 { 462 init_oops_id(); 463 pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); 464 } 465 466 /* 467 * Called when the architecture exits its oops handler, after printing 468 * everything. 469 */ 470 void oops_exit(void) 471 { 472 do_oops_enter_exit(); 473 print_oops_end_marker(); 474 kmsg_dump(KMSG_DUMP_OOPS); 475 } 476 477 struct warn_args { 478 const char *fmt; 479 va_list args; 480 }; 481 482 void __warn(const char *file, int line, void *caller, unsigned taint, 483 struct pt_regs *regs, struct warn_args *args) 484 { 485 disable_trace_on_warning(); 486 487 pr_warn("------------[ cut here ]------------\n"); 488 489 if (file) 490 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", 491 raw_smp_processor_id(), current->pid, file, line, 492 caller); 493 else 494 pr_warn("WARNING: CPU: %d PID: %d at %pS\n", 495 raw_smp_processor_id(), current->pid, caller); 496 497 if (args) 498 vprintk(args->fmt, args->args); 499 500 if (panic_on_warn) { 501 /* 502 * This thread may hit another WARN() in the panic path. 503 * Resetting this prevents additional WARN() from panicking the 504 * system on this thread. Other threads are blocked by the 505 * panic_mutex in panic(). 506 */ 507 panic_on_warn = 0; 508 panic("panic_on_warn set ...\n"); 509 } 510 511 print_modules(); 512 513 if (regs) 514 show_regs(regs); 515 else 516 dump_stack(); 517 518 print_oops_end_marker(); 519 520 /* Just a warning, don't kill lockdep. */ 521 add_taint(taint, LOCKDEP_STILL_OK); 522 } 523 524 #ifdef WANT_WARN_ON_SLOWPATH 525 void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) 526 { 527 struct warn_args args; 528 529 args.fmt = fmt; 530 va_start(args.args, fmt); 531 __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, 532 &args); 533 va_end(args.args); 534 } 535 EXPORT_SYMBOL(warn_slowpath_fmt); 536 537 void warn_slowpath_fmt_taint(const char *file, int line, 538 unsigned taint, const char *fmt, ...) 539 { 540 struct warn_args args; 541 542 args.fmt = fmt; 543 va_start(args.args, fmt); 544 __warn(file, line, __builtin_return_address(0), taint, NULL, &args); 545 va_end(args.args); 546 } 547 EXPORT_SYMBOL(warn_slowpath_fmt_taint); 548 549 void warn_slowpath_null(const char *file, int line) 550 { 551 __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL); 552 } 553 EXPORT_SYMBOL(warn_slowpath_null); 554 #endif 555 556 #ifdef CONFIG_CC_STACKPROTECTOR 557 558 /* 559 * Called when gcc's -fstack-protector feature is used, and 560 * gcc detects corruption of the on-stack canary value 561 */ 562 __visible void __stack_chk_fail(void) 563 { 564 panic("stack-protector: Kernel stack is corrupted in: %p\n", 565 __builtin_return_address(0)); 566 } 567 EXPORT_SYMBOL(__stack_chk_fail); 568 569 #endif 570 571 core_param(panic, panic_timeout, int, 0644); 572 core_param(pause_on_oops, pause_on_oops, int, 0644); 573 core_param(panic_on_warn, panic_on_warn, int, 0644); 574 575 static int __init setup_crash_kexec_post_notifiers(char *s) 576 { 577 crash_kexec_post_notifiers = true; 578 return 0; 579 } 580 early_param("crash_kexec_post_notifiers", setup_crash_kexec_post_notifiers); 581 582 static int __init oops_setup(char *s) 583 { 584 if (!s) 585 return -EINVAL; 586 if (!strcmp(s, "panic")) 587 panic_on_oops = 1; 588 return 0; 589 } 590 early_param("oops", oops_setup); 591