1 /* 2 * linux/kernel/panic.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * This function is used through-out the kernel (including mm and fs) 9 * to indicate a major problem. 10 */ 11 #include <linux/debug_locks.h> 12 #include <linux/interrupt.h> 13 #include <linux/kmsg_dump.h> 14 #include <linux/kallsyms.h> 15 #include <linux/notifier.h> 16 #include <linux/module.h> 17 #include <linux/random.h> 18 #include <linux/reboot.h> 19 #include <linux/delay.h> 20 #include <linux/kexec.h> 21 #include <linux/sched.h> 22 #include <linux/sysrq.h> 23 #include <linux/init.h> 24 #include <linux/nmi.h> 25 #include <linux/dmi.h> 26 27 int panic_on_oops; 28 static unsigned long tainted_mask; 29 static int pause_on_oops; 30 static int pause_on_oops_flag; 31 static DEFINE_SPINLOCK(pause_on_oops_lock); 32 33 int panic_timeout; 34 35 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 36 37 EXPORT_SYMBOL(panic_notifier_list); 38 39 /* Returns how long it waited in ms */ 40 long (*panic_blink)(long time); 41 EXPORT_SYMBOL(panic_blink); 42 43 static void panic_blink_one_second(void) 44 { 45 static long i = 0, end; 46 47 if (panic_blink) { 48 end = i + MSEC_PER_SEC; 49 50 while (i < end) { 51 i += panic_blink(i); 52 mdelay(1); 53 i++; 54 } 55 } else { 56 /* 57 * When running under a hypervisor a small mdelay may get 58 * rounded up to the hypervisor timeslice. For example, with 59 * a 1ms in 10ms hypervisor timeslice we might inflate a 60 * mdelay(1) loop by 10x. 61 * 62 * If we have nothing to blink, spin on 1 second calls to 63 * mdelay to avoid this. 64 */ 65 mdelay(MSEC_PER_SEC); 66 } 67 } 68 69 /** 70 * panic - halt the system 71 * @fmt: The text string to print 72 * 73 * Display a message, then perform cleanups. 74 * 75 * This function never returns. 76 */ 77 NORET_TYPE void panic(const char * fmt, ...) 78 { 79 static char buf[1024]; 80 va_list args; 81 long i; 82 83 /* 84 * It's possible to come here directly from a panic-assertion and 85 * not have preempt disabled. Some functions called from here want 86 * preempt to be disabled. No point enabling it later though... 87 */ 88 preempt_disable(); 89 90 console_verbose(); 91 bust_spinlocks(1); 92 va_start(args, fmt); 93 vsnprintf(buf, sizeof(buf), fmt, args); 94 va_end(args); 95 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); 96 #ifdef CONFIG_DEBUG_BUGVERBOSE 97 dump_stack(); 98 #endif 99 100 /* 101 * If we have crashed and we have a crash kernel loaded let it handle 102 * everything else. 103 * Do we want to call this before we try to display a message? 104 */ 105 crash_kexec(NULL); 106 107 kmsg_dump(KMSG_DUMP_PANIC); 108 109 /* 110 * Note smp_send_stop is the usual smp shutdown function, which 111 * unfortunately means it may not be hardened to work in a panic 112 * situation. 113 */ 114 smp_send_stop(); 115 116 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 117 118 bust_spinlocks(0); 119 120 if (panic_timeout > 0) { 121 /* 122 * Delay timeout seconds before rebooting the machine. 123 * We can't use the "normal" timers since we just panicked. 124 */ 125 printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); 126 127 for (i = 0; i < panic_timeout; i++) { 128 touch_nmi_watchdog(); 129 panic_blink_one_second(); 130 } 131 /* 132 * This will not be a clean reboot, with everything 133 * shutting down. But if there is a chance of 134 * rebooting the system it will be rebooted. 135 */ 136 emergency_restart(); 137 } 138 #ifdef __sparc__ 139 { 140 extern int stop_a_enabled; 141 /* Make sure the user can actually press Stop-A (L1-A) */ 142 stop_a_enabled = 1; 143 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); 144 } 145 #endif 146 #if defined(CONFIG_S390) 147 { 148 unsigned long caller; 149 150 caller = (unsigned long)__builtin_return_address(0); 151 disabled_wait(caller); 152 } 153 #endif 154 local_irq_enable(); 155 while (1) { 156 touch_softlockup_watchdog(); 157 panic_blink_one_second(); 158 } 159 } 160 161 EXPORT_SYMBOL(panic); 162 163 164 struct tnt { 165 u8 bit; 166 char true; 167 char false; 168 }; 169 170 static const struct tnt tnts[] = { 171 { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, 172 { TAINT_FORCED_MODULE, 'F', ' ' }, 173 { TAINT_UNSAFE_SMP, 'S', ' ' }, 174 { TAINT_FORCED_RMMOD, 'R', ' ' }, 175 { TAINT_MACHINE_CHECK, 'M', ' ' }, 176 { TAINT_BAD_PAGE, 'B', ' ' }, 177 { TAINT_USER, 'U', ' ' }, 178 { TAINT_DIE, 'D', ' ' }, 179 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, 180 { TAINT_WARN, 'W', ' ' }, 181 { TAINT_CRAP, 'C', ' ' }, 182 { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, 183 }; 184 185 /** 186 * print_tainted - return a string to represent the kernel taint state. 187 * 188 * 'P' - Proprietary module has been loaded. 189 * 'F' - Module has been forcibly loaded. 190 * 'S' - SMP with CPUs not designed for SMP. 191 * 'R' - User forced a module unload. 192 * 'M' - System experienced a machine check exception. 193 * 'B' - System has hit bad_page. 194 * 'U' - Userspace-defined naughtiness. 195 * 'D' - Kernel has oopsed before 196 * 'A' - ACPI table overridden. 197 * 'W' - Taint on warning. 198 * 'C' - modules from drivers/staging are loaded. 199 * 'I' - Working around severe firmware bug. 200 * 201 * The string is overwritten by the next call to print_tainted(). 202 */ 203 const char *print_tainted(void) 204 { 205 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1]; 206 207 if (tainted_mask) { 208 char *s; 209 int i; 210 211 s = buf + sprintf(buf, "Tainted: "); 212 for (i = 0; i < ARRAY_SIZE(tnts); i++) { 213 const struct tnt *t = &tnts[i]; 214 *s++ = test_bit(t->bit, &tainted_mask) ? 215 t->true : t->false; 216 } 217 *s = 0; 218 } else 219 snprintf(buf, sizeof(buf), "Not tainted"); 220 221 return buf; 222 } 223 224 int test_taint(unsigned flag) 225 { 226 return test_bit(flag, &tainted_mask); 227 } 228 EXPORT_SYMBOL(test_taint); 229 230 unsigned long get_taint(void) 231 { 232 return tainted_mask; 233 } 234 235 void add_taint(unsigned flag) 236 { 237 /* 238 * Can't trust the integrity of the kernel anymore. 239 * We don't call directly debug_locks_off() because the issue 240 * is not necessarily serious enough to set oops_in_progress to 1 241 * Also we want to keep up lockdep for staging development and 242 * post-warning case. 243 */ 244 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 245 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); 246 247 set_bit(flag, &tainted_mask); 248 } 249 EXPORT_SYMBOL(add_taint); 250 251 static void spin_msec(int msecs) 252 { 253 int i; 254 255 for (i = 0; i < msecs; i++) { 256 touch_nmi_watchdog(); 257 mdelay(1); 258 } 259 } 260 261 /* 262 * It just happens that oops_enter() and oops_exit() are identically 263 * implemented... 264 */ 265 static void do_oops_enter_exit(void) 266 { 267 unsigned long flags; 268 static int spin_counter; 269 270 if (!pause_on_oops) 271 return; 272 273 spin_lock_irqsave(&pause_on_oops_lock, flags); 274 if (pause_on_oops_flag == 0) { 275 /* This CPU may now print the oops message */ 276 pause_on_oops_flag = 1; 277 } else { 278 /* We need to stall this CPU */ 279 if (!spin_counter) { 280 /* This CPU gets to do the counting */ 281 spin_counter = pause_on_oops; 282 do { 283 spin_unlock(&pause_on_oops_lock); 284 spin_msec(MSEC_PER_SEC); 285 spin_lock(&pause_on_oops_lock); 286 } while (--spin_counter); 287 pause_on_oops_flag = 0; 288 } else { 289 /* This CPU waits for a different one */ 290 while (spin_counter) { 291 spin_unlock(&pause_on_oops_lock); 292 spin_msec(1); 293 spin_lock(&pause_on_oops_lock); 294 } 295 } 296 } 297 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 298 } 299 300 /* 301 * Return true if the calling CPU is allowed to print oops-related info. 302 * This is a bit racy.. 303 */ 304 int oops_may_print(void) 305 { 306 return pause_on_oops_flag == 0; 307 } 308 309 /* 310 * Called when the architecture enters its oops handler, before it prints 311 * anything. If this is the first CPU to oops, and it's oopsing the first 312 * time then let it proceed. 313 * 314 * This is all enabled by the pause_on_oops kernel boot option. We do all 315 * this to ensure that oopses don't scroll off the screen. It has the 316 * side-effect of preventing later-oopsing CPUs from mucking up the display, 317 * too. 318 * 319 * It turns out that the CPU which is allowed to print ends up pausing for 320 * the right duration, whereas all the other CPUs pause for twice as long: 321 * once in oops_enter(), once in oops_exit(). 322 */ 323 void oops_enter(void) 324 { 325 tracing_off(); 326 /* can't trust the integrity of the kernel anymore: */ 327 debug_locks_off(); 328 do_oops_enter_exit(); 329 } 330 331 /* 332 * 64-bit random ID for oopses: 333 */ 334 static u64 oops_id; 335 336 static int init_oops_id(void) 337 { 338 if (!oops_id) 339 get_random_bytes(&oops_id, sizeof(oops_id)); 340 else 341 oops_id++; 342 343 return 0; 344 } 345 late_initcall(init_oops_id); 346 347 static void print_oops_end_marker(void) 348 { 349 init_oops_id(); 350 printk(KERN_WARNING "---[ end trace %016llx ]---\n", 351 (unsigned long long)oops_id); 352 } 353 354 /* 355 * Called when the architecture exits its oops handler, after printing 356 * everything. 357 */ 358 void oops_exit(void) 359 { 360 do_oops_enter_exit(); 361 print_oops_end_marker(); 362 kmsg_dump(KMSG_DUMP_OOPS); 363 } 364 365 #ifdef WANT_WARN_ON_SLOWPATH 366 struct slowpath_args { 367 const char *fmt; 368 va_list args; 369 }; 370 371 static void warn_slowpath_common(const char *file, int line, void *caller, 372 unsigned taint, struct slowpath_args *args) 373 { 374 const char *board; 375 376 printk(KERN_WARNING "------------[ cut here ]------------\n"); 377 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); 378 board = dmi_get_system_info(DMI_PRODUCT_NAME); 379 if (board) 380 printk(KERN_WARNING "Hardware name: %s\n", board); 381 382 if (args) 383 vprintk(args->fmt, args->args); 384 385 print_modules(); 386 dump_stack(); 387 print_oops_end_marker(); 388 add_taint(taint); 389 } 390 391 void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) 392 { 393 struct slowpath_args args; 394 395 args.fmt = fmt; 396 va_start(args.args, fmt); 397 warn_slowpath_common(file, line, __builtin_return_address(0), 398 TAINT_WARN, &args); 399 va_end(args.args); 400 } 401 EXPORT_SYMBOL(warn_slowpath_fmt); 402 403 void warn_slowpath_fmt_taint(const char *file, int line, 404 unsigned taint, const char *fmt, ...) 405 { 406 struct slowpath_args args; 407 408 args.fmt = fmt; 409 va_start(args.args, fmt); 410 warn_slowpath_common(file, line, __builtin_return_address(0), 411 taint, &args); 412 va_end(args.args); 413 } 414 EXPORT_SYMBOL(warn_slowpath_fmt_taint); 415 416 void warn_slowpath_null(const char *file, int line) 417 { 418 warn_slowpath_common(file, line, __builtin_return_address(0), 419 TAINT_WARN, NULL); 420 } 421 EXPORT_SYMBOL(warn_slowpath_null); 422 #endif 423 424 #ifdef CONFIG_CC_STACKPROTECTOR 425 426 /* 427 * Called when gcc's -fstack-protector feature is used, and 428 * gcc detects corruption of the on-stack canary value 429 */ 430 void __stack_chk_fail(void) 431 { 432 panic("stack-protector: Kernel stack is corrupted in: %p\n", 433 __builtin_return_address(0)); 434 } 435 EXPORT_SYMBOL(__stack_chk_fail); 436 437 #endif 438 439 core_param(panic, panic_timeout, int, 0644); 440 core_param(pause_on_oops, pause_on_oops, int, 0644); 441