1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/panic.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * This function is used through-out the kernel (including mm and fs) 10 * to indicate a major problem. 11 */ 12 #include <linux/debug_locks.h> 13 #include <linux/sched/debug.h> 14 #include <linux/interrupt.h> 15 #include <linux/kgdb.h> 16 #include <linux/kmsg_dump.h> 17 #include <linux/kallsyms.h> 18 #include <linux/notifier.h> 19 #include <linux/vt_kern.h> 20 #include <linux/module.h> 21 #include <linux/random.h> 22 #include <linux/ftrace.h> 23 #include <linux/reboot.h> 24 #include <linux/delay.h> 25 #include <linux/kexec.h> 26 #include <linux/panic_notifier.h> 27 #include <linux/sched.h> 28 #include <linux/string_helpers.h> 29 #include <linux/sysrq.h> 30 #include <linux/init.h> 31 #include <linux/nmi.h> 32 #include <linux/console.h> 33 #include <linux/bug.h> 34 #include <linux/ratelimit.h> 35 #include <linux/debugfs.h> 36 #include <linux/sysfs.h> 37 #include <linux/context_tracking.h> 38 #include <linux/seq_buf.h> 39 #include <linux/sys_info.h> 40 #include <trace/events/error_report.h> 41 #include <asm/sections.h> 42 43 #define PANIC_TIMER_STEP 100 44 #define PANIC_BLINK_SPD 18 45 #define PANIC_MSG_BUFSZ 1024 46 47 #ifdef CONFIG_SMP 48 /* 49 * Should we dump all CPUs backtraces in an oops event? 50 * Defaults to 0, can be changed via sysctl. 51 */ 52 static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; 53 #else 54 #define sysctl_oops_all_cpu_backtrace 0 55 #endif /* CONFIG_SMP */ 56 57 int panic_on_oops = IS_ENABLED(CONFIG_PANIC_ON_OOPS); 58 static unsigned long tainted_mask = 59 IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; 60 static int pause_on_oops; 61 static int pause_on_oops_flag; 62 static DEFINE_SPINLOCK(pause_on_oops_lock); 63 bool crash_kexec_post_notifiers; 64 int panic_on_warn __read_mostly; 65 unsigned long panic_on_taint; 66 bool panic_on_taint_nousertaint = false; 67 static unsigned int warn_limit __read_mostly; 68 static bool panic_console_replay; 69 70 bool panic_triggering_all_cpu_backtrace; 71 static bool panic_this_cpu_backtrace_printed; 72 73 int panic_timeout = CONFIG_PANIC_TIMEOUT; 74 EXPORT_SYMBOL_GPL(panic_timeout); 75 76 unsigned long panic_print; 77 78 static int panic_force_cpu = -1; 79 80 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 81 82 EXPORT_SYMBOL(panic_notifier_list); 83 84 static void panic_print_deprecated(void) 85 { 86 pr_info_once("Kernel: The 'panic_print' parameter is now deprecated. Please use 'panic_sys_info' and 'panic_console_replay' instead.\n"); 87 } 88 89 #ifdef CONFIG_SYSCTL 90 91 /* 92 * Taint values can only be increased 93 * This means we can safely use a temporary. 94 */ 95 static int proc_taint(const struct ctl_table *table, int write, 96 void *buffer, size_t *lenp, loff_t *ppos) 97 { 98 struct ctl_table t; 99 unsigned long tmptaint = get_taint(); 100 int err; 101 102 if (write && !capable(CAP_SYS_ADMIN)) 103 return -EPERM; 104 105 t = *table; 106 t.data = &tmptaint; 107 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); 108 if (err < 0) 109 return err; 110 111 if (write) { 112 int i; 113 114 /* 115 * If we are relying on panic_on_taint not producing 116 * false positives due to userspace input, bail out 117 * before setting the requested taint flags. 118 */ 119 if (panic_on_taint_nousertaint && (tmptaint & panic_on_taint)) 120 return -EINVAL; 121 122 /* 123 * Poor man's atomic or. Not worth adding a primitive 124 * to everyone's atomic.h for this 125 */ 126 for (i = 0; i < TAINT_FLAGS_COUNT; i++) 127 if ((1UL << i) & tmptaint) 128 add_taint(i, LOCKDEP_STILL_OK); 129 } 130 131 return err; 132 } 133 134 static int sysctl_panic_print_handler(const struct ctl_table *table, int write, 135 void *buffer, size_t *lenp, loff_t *ppos) 136 { 137 if (write) 138 panic_print_deprecated(); 139 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 140 } 141 142 static const struct ctl_table kern_panic_table[] = { 143 #ifdef CONFIG_SMP 144 { 145 .procname = "oops_all_cpu_backtrace", 146 .data = &sysctl_oops_all_cpu_backtrace, 147 .maxlen = sizeof(int), 148 .mode = 0644, 149 .proc_handler = proc_dointvec_minmax, 150 .extra1 = SYSCTL_ZERO, 151 .extra2 = SYSCTL_ONE, 152 }, 153 #endif 154 { 155 .procname = "tainted", 156 .maxlen = sizeof(long), 157 .mode = 0644, 158 .proc_handler = proc_taint, 159 }, 160 { 161 .procname = "panic", 162 .data = &panic_timeout, 163 .maxlen = sizeof(int), 164 .mode = 0644, 165 .proc_handler = proc_dointvec, 166 }, 167 { 168 .procname = "panic_on_oops", 169 .data = &panic_on_oops, 170 .maxlen = sizeof(int), 171 .mode = 0644, 172 .proc_handler = proc_dointvec, 173 }, 174 { 175 .procname = "panic_print", 176 .data = &panic_print, 177 .maxlen = sizeof(unsigned long), 178 .mode = 0644, 179 .proc_handler = sysctl_panic_print_handler, 180 }, 181 { 182 .procname = "panic_on_warn", 183 .data = &panic_on_warn, 184 .maxlen = sizeof(int), 185 .mode = 0644, 186 .proc_handler = proc_dointvec_minmax, 187 .extra1 = SYSCTL_ZERO, 188 .extra2 = SYSCTL_ONE, 189 }, 190 { 191 .procname = "warn_limit", 192 .data = &warn_limit, 193 .maxlen = sizeof(warn_limit), 194 .mode = 0644, 195 .proc_handler = proc_douintvec, 196 }, 197 #if (defined(CONFIG_X86_32) || defined(CONFIG_PARISC)) && \ 198 defined(CONFIG_DEBUG_STACKOVERFLOW) 199 { 200 .procname = "panic_on_stackoverflow", 201 .data = &sysctl_panic_on_stackoverflow, 202 .maxlen = sizeof(int), 203 .mode = 0644, 204 .proc_handler = proc_dointvec, 205 }, 206 #endif 207 { 208 .procname = "panic_sys_info", 209 .data = &panic_print, 210 .maxlen = sizeof(panic_print), 211 .mode = 0644, 212 .proc_handler = sysctl_sys_info_handler, 213 }, 214 }; 215 216 static __init int kernel_panic_sysctls_init(void) 217 { 218 register_sysctl_init("kernel", kern_panic_table); 219 return 0; 220 } 221 late_initcall(kernel_panic_sysctls_init); 222 #endif 223 224 /* The format is "panic_sys_info=tasks,mem,locks,ftrace,..." */ 225 static int __init setup_panic_sys_info(char *buf) 226 { 227 /* There is no risk of race in kernel boot phase */ 228 panic_print = sys_info_parse_param(buf); 229 return 1; 230 } 231 __setup("panic_sys_info=", setup_panic_sys_info); 232 233 static atomic_t warn_count = ATOMIC_INIT(0); 234 235 #ifdef CONFIG_SYSFS 236 static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr, 237 char *page) 238 { 239 return sysfs_emit(page, "%d\n", atomic_read(&warn_count)); 240 } 241 242 static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count); 243 244 static __init int kernel_panic_sysfs_init(void) 245 { 246 sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL); 247 return 0; 248 } 249 late_initcall(kernel_panic_sysfs_init); 250 #endif 251 252 static long no_blink(int state) 253 { 254 return 0; 255 } 256 257 /* Returns how long it waited in ms */ 258 long (*panic_blink)(int state); 259 EXPORT_SYMBOL(panic_blink); 260 261 /* 262 * Stop ourself in panic -- architecture code may override this 263 */ 264 void __weak __noreturn panic_smp_self_stop(void) 265 { 266 while (1) 267 cpu_relax(); 268 } 269 270 /* 271 * Stop ourselves in NMI context if another CPU has already panicked. Arch code 272 * may override this to prepare for crash dumping, e.g. save regs info. 273 */ 274 void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs) 275 { 276 panic_smp_self_stop(); 277 } 278 279 /* 280 * Stop other CPUs in panic. Architecture dependent code may override this 281 * with more suitable version. For example, if the architecture supports 282 * crash dump, it should save registers of each stopped CPU and disable 283 * per-CPU features such as virtualization extensions. 284 */ 285 void __weak crash_smp_send_stop(void) 286 { 287 static int cpus_stopped; 288 289 /* 290 * This function can be called twice in panic path, but obviously 291 * we execute this only once. 292 */ 293 if (cpus_stopped) 294 return; 295 296 /* 297 * Note smp_send_stop is the usual smp shutdown function, which 298 * unfortunately means it may not be hardened to work in a panic 299 * situation. 300 */ 301 smp_send_stop(); 302 cpus_stopped = 1; 303 } 304 305 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); 306 atomic_t panic_redirect_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); 307 308 #if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP) 309 static char *panic_force_buf; 310 311 static int __init panic_force_cpu_setup(char *str) 312 { 313 int cpu; 314 315 if (!str) 316 return -EINVAL; 317 318 if (kstrtoint(str, 0, &cpu) || cpu < 0 || cpu >= nr_cpu_ids) { 319 pr_warn("panic_force_cpu: invalid value '%s'\n", str); 320 return -EINVAL; 321 } 322 323 panic_force_cpu = cpu; 324 return 0; 325 } 326 early_param("panic_force_cpu", panic_force_cpu_setup); 327 328 static int __init panic_force_cpu_late_init(void) 329 { 330 if (panic_force_cpu < 0) 331 return 0; 332 333 panic_force_buf = kmalloc(PANIC_MSG_BUFSZ, GFP_KERNEL); 334 335 return 0; 336 } 337 late_initcall(panic_force_cpu_late_init); 338 339 static void do_panic_on_target_cpu(void *info) 340 { 341 panic("%s", (char *)info); 342 } 343 344 /** 345 * panic_smp_redirect_cpu - Redirect panic to target CPU 346 * @target_cpu: CPU that should handle the panic 347 * @msg: formatted panic message 348 * 349 * Default implementation uses IPI. Architectures with NMI support 350 * can override this for more reliable delivery. 351 * 352 * Return: 0 on success, negative errno on failure 353 */ 354 int __weak panic_smp_redirect_cpu(int target_cpu, void *msg) 355 { 356 static call_single_data_t panic_csd; 357 358 panic_csd.func = do_panic_on_target_cpu; 359 panic_csd.info = msg; 360 361 return smp_call_function_single_async(target_cpu, &panic_csd); 362 } 363 364 /** 365 * panic_try_force_cpu - Redirect panic to a specific CPU for crash kernel 366 * @fmt: panic message format string 367 * @args: arguments for format string 368 * 369 * Some platforms require panic handling to occur on a specific CPU 370 * for the crash kernel to function correctly. This function redirects 371 * panic handling to the CPU specified via the panic_force_cpu= boot parameter. 372 * 373 * Returns false if panic should proceed on current CPU. 374 * Returns true if panic was redirected. 375 */ 376 __printf(1, 0) 377 static bool panic_try_force_cpu(const char *fmt, va_list args) 378 { 379 int this_cpu = raw_smp_processor_id(); 380 int old_cpu = PANIC_CPU_INVALID; 381 const char *msg; 382 383 /* Feature not enabled via boot parameter */ 384 if (panic_force_cpu < 0) 385 return false; 386 387 /* Already on target CPU - proceed normally */ 388 if (this_cpu == panic_force_cpu) 389 return false; 390 391 /* Target CPU is offline, can't redirect */ 392 if (!cpu_online(panic_force_cpu)) { 393 pr_warn("panic: target CPU %d is offline, continuing on CPU %d\n", 394 panic_force_cpu, this_cpu); 395 return false; 396 } 397 398 /* Another panic already in progress */ 399 if (panic_in_progress()) 400 return false; 401 402 /* 403 * Only one CPU can do the redirect. Use atomic cmpxchg to ensure 404 * we don't race with another CPU also trying to redirect. 405 */ 406 if (!atomic_try_cmpxchg(&panic_redirect_cpu, &old_cpu, this_cpu)) 407 return false; 408 409 /* 410 * Use dynamically allocated buffer if available, otherwise 411 * fall back to static message for early boot panics or allocation failure. 412 */ 413 if (panic_force_buf) { 414 vsnprintf(panic_force_buf, PANIC_MSG_BUFSZ, fmt, args); 415 msg = panic_force_buf; 416 } else { 417 msg = "Redirected panic (buffer unavailable)"; 418 } 419 420 console_verbose(); 421 bust_spinlocks(1); 422 423 pr_emerg("panic: Redirecting from CPU %d to CPU %d for crash kernel.\n", 424 this_cpu, panic_force_cpu); 425 426 /* Dump original CPU before redirecting */ 427 if (!test_taint(TAINT_DIE) && 428 oops_in_progress <= 1 && 429 IS_ENABLED(CONFIG_DEBUG_BUGVERBOSE)) { 430 dump_stack(); 431 } 432 433 if (panic_smp_redirect_cpu(panic_force_cpu, (void *)msg) != 0) { 434 atomic_set(&panic_redirect_cpu, PANIC_CPU_INVALID); 435 pr_warn("panic: failed to redirect to CPU %d, continuing on CPU %d\n", 436 panic_force_cpu, this_cpu); 437 return false; 438 } 439 440 /* IPI/NMI sent, this CPU should stop */ 441 return true; 442 } 443 #else 444 __printf(1, 0) 445 static inline bool panic_try_force_cpu(const char *fmt, va_list args) 446 { 447 return false; 448 } 449 #endif /* CONFIG_SMP && CONFIG_CRASH_DUMP */ 450 451 bool panic_try_start(void) 452 { 453 int old_cpu, this_cpu; 454 455 /* 456 * Only one CPU is allowed to execute the crash_kexec() code as with 457 * panic(). Otherwise parallel calls of panic() and crash_kexec() 458 * may stop each other. To exclude them, we use panic_cpu here too. 459 */ 460 old_cpu = PANIC_CPU_INVALID; 461 this_cpu = raw_smp_processor_id(); 462 463 return atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu); 464 } 465 EXPORT_SYMBOL(panic_try_start); 466 467 void panic_reset(void) 468 { 469 atomic_set(&panic_cpu, PANIC_CPU_INVALID); 470 } 471 EXPORT_SYMBOL(panic_reset); 472 473 bool panic_in_progress(void) 474 { 475 return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID); 476 } 477 EXPORT_SYMBOL(panic_in_progress); 478 479 /* Return true if a panic is in progress on the current CPU. */ 480 bool panic_on_this_cpu(void) 481 { 482 /* 483 * We can use raw_smp_processor_id() here because it is impossible for 484 * the task to be migrated to the panic_cpu, or away from it. If 485 * panic_cpu has already been set, and we're not currently executing on 486 * that CPU, then we never will be. 487 */ 488 return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id()); 489 } 490 EXPORT_SYMBOL(panic_on_this_cpu); 491 492 /* 493 * Return true if a panic is in progress on a remote CPU. 494 * 495 * On true, the local CPU should immediately release any printing resources 496 * that may be needed by the panic CPU. 497 */ 498 bool panic_on_other_cpu(void) 499 { 500 return (panic_in_progress() && !panic_on_this_cpu()); 501 } 502 EXPORT_SYMBOL(panic_on_other_cpu); 503 504 /* 505 * A variant of panic() called from NMI context. We return if we've already 506 * panicked on this CPU. If another CPU already panicked, loop in 507 * nmi_panic_self_stop() which can provide architecture dependent code such 508 * as saving register state for crash dump. 509 */ 510 void nmi_panic(struct pt_regs *regs, const char *msg) 511 { 512 if (panic_try_start()) 513 panic("%s", msg); 514 else if (panic_on_other_cpu()) 515 nmi_panic_self_stop(regs); 516 } 517 EXPORT_SYMBOL(nmi_panic); 518 519 void check_panic_on_warn(const char *origin) 520 { 521 unsigned int limit; 522 523 if (panic_on_warn) 524 panic("%s: panic_on_warn set ...\n", origin); 525 526 limit = READ_ONCE(warn_limit); 527 if (atomic_inc_return(&warn_count) >= limit && limit) 528 panic("%s: system warned too often (kernel.warn_limit is %d)", 529 origin, limit); 530 } 531 532 static void panic_trigger_all_cpu_backtrace(void) 533 { 534 /* Temporary allow non-panic CPUs to write their backtraces. */ 535 panic_triggering_all_cpu_backtrace = true; 536 537 if (panic_this_cpu_backtrace_printed) 538 trigger_allbutcpu_cpu_backtrace(raw_smp_processor_id()); 539 else 540 trigger_all_cpu_backtrace(); 541 542 panic_triggering_all_cpu_backtrace = false; 543 } 544 545 /* 546 * Helper that triggers the NMI backtrace (if set in panic_print) 547 * and then performs the secondary CPUs shutdown - we cannot have 548 * the NMI backtrace after the CPUs are off! 549 */ 550 static void panic_other_cpus_shutdown(bool crash_kexec) 551 { 552 if (panic_print & SYS_INFO_ALL_BT) 553 panic_trigger_all_cpu_backtrace(); 554 555 /* 556 * Note that smp_send_stop() is the usual SMP shutdown function, 557 * which unfortunately may not be hardened to work in a panic 558 * situation. If we want to do crash dump after notifier calls 559 * and kmsg_dump, we will need architecture dependent extra 560 * bits in addition to stopping other CPUs, hence we rely on 561 * crash_smp_send_stop() for that. 562 */ 563 if (!crash_kexec) 564 smp_send_stop(); 565 else 566 crash_smp_send_stop(); 567 } 568 569 /** 570 * vpanic - halt the system 571 * @fmt: The text string to print 572 * @args: Arguments for the format string 573 * 574 * Display a message, then perform cleanups. This function never returns. 575 */ 576 void vpanic(const char *fmt, va_list args) 577 { 578 static char buf[PANIC_MSG_BUFSZ]; 579 long i, i_next = 0, len; 580 int state = 0; 581 bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; 582 583 if (panic_on_warn) { 584 /* 585 * This thread may hit another WARN() in the panic path. 586 * Resetting this prevents additional WARN() from panicking the 587 * system on this thread. Other threads are blocked by the 588 * panic_mutex in panic(). 589 */ 590 panic_on_warn = 0; 591 } 592 593 /* 594 * Disable local interrupts. This will prevent panic_smp_self_stop 595 * from deadlocking the first cpu that invokes the panic, since 596 * there is nothing to prevent an interrupt handler (that runs 597 * after setting panic_cpu) from invoking panic() again. 598 */ 599 local_irq_disable(); 600 preempt_disable_notrace(); 601 602 /* Redirect panic to target CPU if configured via panic_force_cpu=. */ 603 if (panic_try_force_cpu(fmt, args)) { 604 /* 605 * Mark ourselves offline so panic_other_cpus_shutdown() won't wait 606 * for us on architectures that check num_online_cpus(). 607 */ 608 set_cpu_online(smp_processor_id(), false); 609 panic_smp_self_stop(); 610 } 611 /* 612 * It's possible to come here directly from a panic-assertion and 613 * not have preempt disabled. Some functions called from here want 614 * preempt to be disabled. No point enabling it later though... 615 * 616 * Only one CPU is allowed to execute the panic code from here. For 617 * multiple parallel invocations of panic, all other CPUs either 618 * stop themself or will wait until they are stopped by the 1st CPU 619 * with smp_send_stop(). 620 * 621 * cmpxchg success means this is the 1st CPU which comes here, 622 * so go ahead. 623 * `old_cpu == this_cpu' means we came from nmi_panic() which sets 624 * panic_cpu to this CPU. In this case, this is also the 1st CPU. 625 */ 626 /* atomic_try_cmpxchg updates old_cpu on failure */ 627 if (panic_try_start()) { 628 /* go ahead */ 629 } else if (panic_on_other_cpu()) 630 panic_smp_self_stop(); 631 632 console_verbose(); 633 bust_spinlocks(1); 634 len = vscnprintf(buf, sizeof(buf), fmt, args); 635 636 if (len && buf[len - 1] == '\n') 637 buf[len - 1] = '\0'; 638 639 pr_emerg("Kernel panic - not syncing: %s\n", buf); 640 /* 641 * Avoid nested stack-dumping if a panic occurs during oops processing 642 */ 643 if (atomic_read(&panic_redirect_cpu) != PANIC_CPU_INVALID && 644 panic_force_cpu == raw_smp_processor_id()) { 645 pr_emerg("panic: Redirected from CPU %d, skipping stack dump.\n", 646 atomic_read(&panic_redirect_cpu)); 647 } else if (test_taint(TAINT_DIE) || oops_in_progress > 1) { 648 panic_this_cpu_backtrace_printed = true; 649 } else if (IS_ENABLED(CONFIG_DEBUG_BUGVERBOSE)) { 650 dump_stack(); 651 panic_this_cpu_backtrace_printed = true; 652 } 653 654 /* 655 * If kgdb is enabled, give it a chance to run before we stop all 656 * the other CPUs or else we won't be able to debug processes left 657 * running on them. 658 */ 659 kgdb_panic(buf); 660 661 /* 662 * If we have crashed and we have a crash kernel loaded let it handle 663 * everything else. 664 * If we want to run this after calling panic_notifiers, pass 665 * the "crash_kexec_post_notifiers" option to the kernel. 666 * 667 * Bypass the panic_cpu check and call __crash_kexec directly. 668 */ 669 if (!_crash_kexec_post_notifiers) 670 __crash_kexec(NULL); 671 672 panic_other_cpus_shutdown(_crash_kexec_post_notifiers); 673 674 printk_legacy_allow_panic_sync(); 675 676 /* 677 * Run any panic handlers, including those that might need to 678 * add information to the kmsg dump output. 679 */ 680 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 681 682 sys_info(panic_print); 683 684 kmsg_dump_desc(KMSG_DUMP_PANIC, buf); 685 686 /* 687 * If you doubt kdump always works fine in any situation, 688 * "crash_kexec_post_notifiers" offers you a chance to run 689 * panic_notifiers and dumping kmsg before kdump. 690 * Note: since some panic_notifiers can make crashed kernel 691 * more unstable, it can increase risks of the kdump failure too. 692 * 693 * Bypass the panic_cpu check and call __crash_kexec directly. 694 */ 695 if (_crash_kexec_post_notifiers) 696 __crash_kexec(NULL); 697 698 console_unblank(); 699 700 /* 701 * We may have ended up stopping the CPU holding the lock (in 702 * smp_send_stop()) while still having some valuable data in the console 703 * buffer. Try to acquire the lock then release it regardless of the 704 * result. The release will also print the buffers out. Locks debug 705 * should be disabled to avoid reporting bad unlock balance when 706 * panic() is not being callled from OOPS. 707 */ 708 debug_locks_off(); 709 console_flush_on_panic(CONSOLE_FLUSH_PENDING); 710 711 if ((panic_print & SYS_INFO_PANIC_CONSOLE_REPLAY) || 712 panic_console_replay) 713 console_flush_on_panic(CONSOLE_REPLAY_ALL); 714 715 if (!panic_blink) 716 panic_blink = no_blink; 717 718 if (panic_timeout > 0) { 719 /* 720 * Delay timeout seconds before rebooting the machine. 721 * We can't use the "normal" timers since we just panicked. 722 */ 723 pr_emerg("Rebooting in %d seconds..\n", panic_timeout); 724 725 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 726 touch_nmi_watchdog(); 727 if (i >= i_next) { 728 i += panic_blink(state ^= 1); 729 i_next = i + 3600 / PANIC_BLINK_SPD; 730 } 731 mdelay(PANIC_TIMER_STEP); 732 } 733 } 734 if (panic_timeout != 0) { 735 /* 736 * This will not be a clean reboot, with everything 737 * shutting down. But if there is a chance of 738 * rebooting the system it will be rebooted. 739 */ 740 if (panic_reboot_mode != REBOOT_UNDEFINED) 741 reboot_mode = panic_reboot_mode; 742 emergency_restart(); 743 } 744 #ifdef __sparc__ 745 { 746 extern int stop_a_enabled; 747 /* Make sure the user can actually press Stop-A (L1-A) */ 748 stop_a_enabled = 1; 749 pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" 750 "twice on console to return to the boot prom\n"); 751 } 752 #endif 753 #if defined(CONFIG_S390) 754 disabled_wait(); 755 #endif 756 pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); 757 758 /* Do not scroll important messages printed above */ 759 suppress_printk = 1; 760 761 /* 762 * The final messages may not have been printed if in a context that 763 * defers printing (such as NMI) and irq_work is not available. 764 * Explicitly flush the kernel log buffer one last time. 765 */ 766 console_flush_on_panic(CONSOLE_FLUSH_PENDING); 767 nbcon_atomic_flush_unsafe(); 768 769 local_irq_enable(); 770 for (i = 0; ; i += PANIC_TIMER_STEP) { 771 touch_softlockup_watchdog(); 772 if (i >= i_next) { 773 i += panic_blink(state ^= 1); 774 i_next = i + 3600 / PANIC_BLINK_SPD; 775 } 776 mdelay(PANIC_TIMER_STEP); 777 } 778 } 779 EXPORT_SYMBOL(vpanic); 780 781 /* Identical to vpanic(), except it takes variadic arguments instead of va_list */ 782 void panic(const char *fmt, ...) 783 { 784 va_list args; 785 786 va_start(args, fmt); 787 vpanic(fmt, args); 788 va_end(args); 789 } 790 EXPORT_SYMBOL(panic); 791 792 #define TAINT_FLAG(taint, _c_true, _c_false) \ 793 [ TAINT_##taint ] = { \ 794 .c_true = _c_true, .c_false = _c_false, \ 795 .desc = #taint, \ 796 } 797 798 /* 799 * NOTE: if you modify the taint_flags or TAINT_FLAGS_COUNT, 800 * please also modify tools/debugging/kernel-chktaint and 801 * Documentation/admin-guide/tainted-kernels.rst, including its 802 * small shell script that prints the TAINT_FLAGS_COUNT bits of 803 * /proc/sys/kernel/tainted. 804 */ 805 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { 806 TAINT_FLAG(PROPRIETARY_MODULE, 'P', 'G'), 807 TAINT_FLAG(FORCED_MODULE, 'F', ' '), 808 TAINT_FLAG(CPU_OUT_OF_SPEC, 'S', ' '), 809 TAINT_FLAG(FORCED_RMMOD, 'R', ' '), 810 TAINT_FLAG(MACHINE_CHECK, 'M', ' '), 811 TAINT_FLAG(BAD_PAGE, 'B', ' '), 812 TAINT_FLAG(USER, 'U', ' '), 813 TAINT_FLAG(DIE, 'D', ' '), 814 TAINT_FLAG(OVERRIDDEN_ACPI_TABLE, 'A', ' '), 815 TAINT_FLAG(WARN, 'W', ' '), 816 TAINT_FLAG(CRAP, 'C', ' '), 817 TAINT_FLAG(FIRMWARE_WORKAROUND, 'I', ' '), 818 TAINT_FLAG(OOT_MODULE, 'O', ' '), 819 TAINT_FLAG(UNSIGNED_MODULE, 'E', ' '), 820 TAINT_FLAG(SOFTLOCKUP, 'L', ' '), 821 TAINT_FLAG(LIVEPATCH, 'K', ' '), 822 TAINT_FLAG(AUX, 'X', ' '), 823 TAINT_FLAG(RANDSTRUCT, 'T', ' '), 824 TAINT_FLAG(TEST, 'N', ' '), 825 TAINT_FLAG(FWCTL, 'J', ' '), 826 }; 827 828 #undef TAINT_FLAG 829 830 static void print_tainted_seq(struct seq_buf *s, bool verbose) 831 { 832 const char *sep = ""; 833 int i; 834 835 if (!tainted_mask) { 836 seq_buf_puts(s, "Not tainted"); 837 return; 838 } 839 840 seq_buf_printf(s, "Tainted: "); 841 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 842 const struct taint_flag *t = &taint_flags[i]; 843 bool is_set = test_bit(i, &tainted_mask); 844 char c = is_set ? t->c_true : t->c_false; 845 846 if (verbose) { 847 if (is_set) { 848 seq_buf_printf(s, "%s[%c]=%s", sep, c, t->desc); 849 sep = ", "; 850 } 851 } else { 852 seq_buf_putc(s, c); 853 } 854 } 855 } 856 857 static const char *_print_tainted(bool verbose) 858 { 859 /* FIXME: what should the size be? */ 860 static char buf[sizeof(taint_flags)]; 861 struct seq_buf s; 862 863 BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); 864 865 seq_buf_init(&s, buf, sizeof(buf)); 866 867 print_tainted_seq(&s, verbose); 868 869 return seq_buf_str(&s); 870 } 871 872 /** 873 * print_tainted - return a string to represent the kernel taint state. 874 * 875 * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst 876 * 877 * The string is overwritten by the next call to print_tainted(), 878 * but is always NULL terminated. 879 */ 880 const char *print_tainted(void) 881 { 882 return _print_tainted(false); 883 } 884 885 /** 886 * print_tainted_verbose - A more verbose version of print_tainted() 887 */ 888 const char *print_tainted_verbose(void) 889 { 890 return _print_tainted(true); 891 } 892 893 int test_taint(unsigned flag) 894 { 895 return test_bit(flag, &tainted_mask); 896 } 897 EXPORT_SYMBOL(test_taint); 898 899 unsigned long get_taint(void) 900 { 901 return tainted_mask; 902 } 903 904 /** 905 * add_taint: add a taint flag if not already set. 906 * @flag: one of the TAINT_* constants. 907 * @lockdep_ok: whether lock debugging is still OK. 908 * 909 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for 910 * some notewortht-but-not-corrupting cases, it can be set to true. 911 */ 912 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) 913 { 914 if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) 915 pr_warn("Disabling lock debugging due to kernel taint\n"); 916 917 set_bit(flag, &tainted_mask); 918 919 if (tainted_mask & panic_on_taint) { 920 panic_on_taint = 0; 921 panic("panic_on_taint set ..."); 922 } 923 } 924 EXPORT_SYMBOL(add_taint); 925 926 static void spin_msec(int msecs) 927 { 928 int i; 929 930 for (i = 0; i < msecs; i++) { 931 touch_nmi_watchdog(); 932 mdelay(1); 933 } 934 } 935 936 /* 937 * It just happens that oops_enter() and oops_exit() are identically 938 * implemented... 939 */ 940 static void do_oops_enter_exit(void) 941 { 942 unsigned long flags; 943 static int spin_counter; 944 945 if (!pause_on_oops) 946 return; 947 948 spin_lock_irqsave(&pause_on_oops_lock, flags); 949 if (pause_on_oops_flag == 0) { 950 /* This CPU may now print the oops message */ 951 pause_on_oops_flag = 1; 952 } else { 953 /* We need to stall this CPU */ 954 if (!spin_counter) { 955 /* This CPU gets to do the counting */ 956 spin_counter = pause_on_oops; 957 do { 958 spin_unlock(&pause_on_oops_lock); 959 spin_msec(MSEC_PER_SEC); 960 spin_lock(&pause_on_oops_lock); 961 } while (--spin_counter); 962 pause_on_oops_flag = 0; 963 } else { 964 /* This CPU waits for a different one */ 965 while (spin_counter) { 966 spin_unlock(&pause_on_oops_lock); 967 spin_msec(1); 968 spin_lock(&pause_on_oops_lock); 969 } 970 } 971 } 972 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 973 } 974 975 /* 976 * Return true if the calling CPU is allowed to print oops-related info. 977 * This is a bit racy.. 978 */ 979 bool oops_may_print(void) 980 { 981 return pause_on_oops_flag == 0; 982 } 983 984 /* 985 * Called when the architecture enters its oops handler, before it prints 986 * anything. If this is the first CPU to oops, and it's oopsing the first 987 * time then let it proceed. 988 * 989 * This is all enabled by the pause_on_oops kernel boot option. We do all 990 * this to ensure that oopses don't scroll off the screen. It has the 991 * side-effect of preventing later-oopsing CPUs from mucking up the display, 992 * too. 993 * 994 * It turns out that the CPU which is allowed to print ends up pausing for 995 * the right duration, whereas all the other CPUs pause for twice as long: 996 * once in oops_enter(), once in oops_exit(). 997 */ 998 void oops_enter(void) 999 { 1000 nbcon_cpu_emergency_enter(); 1001 tracing_off(); 1002 /* can't trust the integrity of the kernel anymore: */ 1003 debug_locks_off(); 1004 do_oops_enter_exit(); 1005 1006 if (sysctl_oops_all_cpu_backtrace) 1007 trigger_all_cpu_backtrace(); 1008 } 1009 1010 static void print_oops_end_marker(void) 1011 { 1012 pr_warn("---[ end trace %016llx ]---\n", 0ULL); 1013 } 1014 1015 /* 1016 * Called when the architecture exits its oops handler, after printing 1017 * everything. 1018 */ 1019 void oops_exit(void) 1020 { 1021 do_oops_enter_exit(); 1022 print_oops_end_marker(); 1023 nbcon_cpu_emergency_exit(); 1024 kmsg_dump(KMSG_DUMP_OOPS); 1025 } 1026 1027 struct warn_args { 1028 const char *fmt; 1029 va_list args; 1030 }; 1031 1032 void __warn(const char *file, int line, void *caller, unsigned taint, 1033 struct pt_regs *regs, struct warn_args *args) 1034 { 1035 nbcon_cpu_emergency_enter(); 1036 1037 disable_trace_on_warning(); 1038 1039 if (file) { 1040 pr_warn("WARNING: %s:%d at %pS, CPU#%d: %s/%d\n", 1041 file, line, caller, 1042 raw_smp_processor_id(), current->comm, current->pid); 1043 } else { 1044 pr_warn("WARNING: at %pS, CPU#%d: %s/%d\n", 1045 caller, 1046 raw_smp_processor_id(), current->comm, current->pid); 1047 } 1048 1049 #pragma GCC diagnostic push 1050 #ifndef __clang__ 1051 #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" 1052 #endif 1053 if (args) 1054 vprintk(args->fmt, args->args); 1055 #pragma GCC diagnostic pop 1056 1057 print_modules(); 1058 1059 if (regs) 1060 show_regs(regs); 1061 1062 check_panic_on_warn("kernel"); 1063 1064 if (!regs) 1065 dump_stack(); 1066 1067 print_irqtrace_events(current); 1068 1069 print_oops_end_marker(); 1070 trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller); 1071 1072 /* Just a warning, don't kill lockdep. */ 1073 add_taint(taint, LOCKDEP_STILL_OK); 1074 1075 nbcon_cpu_emergency_exit(); 1076 } 1077 1078 #ifdef CONFIG_BUG 1079 #ifndef __WARN_FLAGS 1080 void warn_slowpath_fmt(const char *file, int line, unsigned taint, 1081 const char *fmt, ...) 1082 { 1083 bool rcu = warn_rcu_enter(); 1084 struct warn_args args; 1085 1086 pr_warn(CUT_HERE); 1087 1088 if (!fmt) { 1089 __warn(file, line, __builtin_return_address(0), taint, 1090 NULL, NULL); 1091 warn_rcu_exit(rcu); 1092 return; 1093 } 1094 1095 args.fmt = fmt; 1096 va_start(args.args, fmt); 1097 __warn(file, line, __builtin_return_address(0), taint, NULL, &args); 1098 va_end(args.args); 1099 warn_rcu_exit(rcu); 1100 } 1101 EXPORT_SYMBOL(warn_slowpath_fmt); 1102 #else 1103 void __warn_printk(const char *fmt, ...) 1104 { 1105 bool rcu = warn_rcu_enter(); 1106 va_list args; 1107 1108 pr_warn(CUT_HERE); 1109 1110 va_start(args, fmt); 1111 vprintk(fmt, args); 1112 va_end(args); 1113 warn_rcu_exit(rcu); 1114 } 1115 EXPORT_SYMBOL(__warn_printk); 1116 #endif 1117 1118 /* Support resetting WARN*_ONCE state */ 1119 1120 static int clear_warn_once_set(void *data, u64 val) 1121 { 1122 generic_bug_clear_once(); 1123 memset(__start_once, 0, __end_once - __start_once); 1124 return 0; 1125 } 1126 1127 DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, 1128 "%lld\n"); 1129 1130 static __init int register_warn_debugfs(void) 1131 { 1132 /* Don't care about failure */ 1133 debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, 1134 &clear_warn_once_fops); 1135 return 0; 1136 } 1137 1138 device_initcall(register_warn_debugfs); 1139 #endif 1140 1141 #ifdef CONFIG_STACKPROTECTOR 1142 1143 /* 1144 * Called when gcc's -fstack-protector feature is used, and 1145 * gcc detects corruption of the on-stack canary value 1146 */ 1147 __visible noinstr void __stack_chk_fail(void) 1148 { 1149 unsigned long flags; 1150 1151 instrumentation_begin(); 1152 flags = user_access_save(); 1153 1154 panic("stack-protector: Kernel stack is corrupted in: %pB", 1155 __builtin_return_address(0)); 1156 1157 user_access_restore(flags); 1158 instrumentation_end(); 1159 } 1160 EXPORT_SYMBOL(__stack_chk_fail); 1161 1162 #endif 1163 1164 core_param(panic, panic_timeout, int, 0644); 1165 core_param(pause_on_oops, pause_on_oops, int, 0644); 1166 core_param(panic_on_warn, panic_on_warn, int, 0644); 1167 core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); 1168 core_param(panic_console_replay, panic_console_replay, bool, 0644); 1169 1170 static int panic_print_set(const char *val, const struct kernel_param *kp) 1171 { 1172 panic_print_deprecated(); 1173 return param_set_ulong(val, kp); 1174 } 1175 1176 static int panic_print_get(char *val, const struct kernel_param *kp) 1177 { 1178 return param_get_ulong(val, kp); 1179 } 1180 1181 static const struct kernel_param_ops panic_print_ops = { 1182 .set = panic_print_set, 1183 .get = panic_print_get, 1184 }; 1185 __core_param_cb(panic_print, &panic_print_ops, &panic_print, 0644); 1186 1187 static int __init oops_setup(char *s) 1188 { 1189 if (!s) 1190 return -EINVAL; 1191 if (!strcmp(s, "panic")) 1192 panic_on_oops = 1; 1193 return 0; 1194 } 1195 early_param("oops", oops_setup); 1196 1197 static int __init panic_on_taint_setup(char *s) 1198 { 1199 char *taint_str; 1200 1201 if (!s) 1202 return -EINVAL; 1203 1204 taint_str = strsep(&s, ","); 1205 if (kstrtoul(taint_str, 16, &panic_on_taint)) 1206 return -EINVAL; 1207 1208 /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */ 1209 panic_on_taint &= TAINT_FLAGS_MAX; 1210 1211 if (!panic_on_taint) 1212 return -EINVAL; 1213 1214 if (s && !strcmp(s, "nousertaint")) 1215 panic_on_taint_nousertaint = true; 1216 1217 pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%s\n", 1218 panic_on_taint, str_enabled_disabled(panic_on_taint_nousertaint)); 1219 1220 return 0; 1221 } 1222 early_param("panic_on_taint", panic_on_taint_setup); 1223