1 /* 2 * linux/kernel/panic.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * This function is used through-out the kernel (including mm and fs) 9 * to indicate a major problem. 10 */ 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/delay.h> 14 #include <linux/reboot.h> 15 #include <linux/notifier.h> 16 #include <linux/init.h> 17 #include <linux/sysrq.h> 18 #include <linux/interrupt.h> 19 #include <linux/nmi.h> 20 #include <linux/kexec.h> 21 #include <linux/debug_locks.h> 22 #include <linux/random.h> 23 #include <linux/kallsyms.h> 24 25 int panic_on_oops; 26 static unsigned long tainted_mask; 27 static int pause_on_oops; 28 static int pause_on_oops_flag; 29 static DEFINE_SPINLOCK(pause_on_oops_lock); 30 31 int panic_timeout; 32 33 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 34 35 EXPORT_SYMBOL(panic_notifier_list); 36 37 static long no_blink(long time) 38 { 39 return 0; 40 } 41 42 /* Returns how long it waited in ms */ 43 long (*panic_blink)(long time); 44 EXPORT_SYMBOL(panic_blink); 45 46 /** 47 * panic - halt the system 48 * @fmt: The text string to print 49 * 50 * Display a message, then perform cleanups. 51 * 52 * This function never returns. 53 */ 54 55 NORET_TYPE void panic(const char * fmt, ...) 56 { 57 long i; 58 static char buf[1024]; 59 va_list args; 60 #if defined(CONFIG_S390) 61 unsigned long caller = (unsigned long) __builtin_return_address(0); 62 #endif 63 64 /* 65 * It's possible to come here directly from a panic-assertion and not 66 * have preempt disabled. Some functions called from here want 67 * preempt to be disabled. No point enabling it later though... 68 */ 69 preempt_disable(); 70 71 bust_spinlocks(1); 72 va_start(args, fmt); 73 vsnprintf(buf, sizeof(buf), fmt, args); 74 va_end(args); 75 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); 76 bust_spinlocks(0); 77 78 /* 79 * If we have crashed and we have a crash kernel loaded let it handle 80 * everything else. 81 * Do we want to call this before we try to display a message? 82 */ 83 crash_kexec(NULL); 84 85 #ifdef CONFIG_SMP 86 /* 87 * Note smp_send_stop is the usual smp shutdown function, which 88 * unfortunately means it may not be hardened to work in a panic 89 * situation. 90 */ 91 smp_send_stop(); 92 #endif 93 94 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 95 96 if (!panic_blink) 97 panic_blink = no_blink; 98 99 if (panic_timeout > 0) { 100 /* 101 * Delay timeout seconds before rebooting the machine. 102 * We can't use the "normal" timers since we just panicked.. 103 */ 104 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout); 105 for (i = 0; i < panic_timeout*1000; ) { 106 touch_nmi_watchdog(); 107 i += panic_blink(i); 108 mdelay(1); 109 i++; 110 } 111 /* This will not be a clean reboot, with everything 112 * shutting down. But if there is a chance of 113 * rebooting the system it will be rebooted. 114 */ 115 emergency_restart(); 116 } 117 #ifdef __sparc__ 118 { 119 extern int stop_a_enabled; 120 /* Make sure the user can actually press Stop-A (L1-A) */ 121 stop_a_enabled = 1; 122 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); 123 } 124 #endif 125 #if defined(CONFIG_S390) 126 disabled_wait(caller); 127 #endif 128 local_irq_enable(); 129 for (i = 0;;) { 130 touch_softlockup_watchdog(); 131 i += panic_blink(i); 132 mdelay(1); 133 i++; 134 } 135 } 136 137 EXPORT_SYMBOL(panic); 138 139 140 struct tnt { 141 u8 bit; 142 char true; 143 char false; 144 }; 145 146 static const struct tnt tnts[] = { 147 { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, 148 { TAINT_FORCED_MODULE, 'F', ' ' }, 149 { TAINT_UNSAFE_SMP, 'S', ' ' }, 150 { TAINT_FORCED_RMMOD, 'R', ' ' }, 151 { TAINT_MACHINE_CHECK, 'M', ' ' }, 152 { TAINT_BAD_PAGE, 'B', ' ' }, 153 { TAINT_USER, 'U', ' ' }, 154 { TAINT_DIE, 'D', ' ' }, 155 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, 156 { TAINT_WARN, 'W', ' ' }, 157 { TAINT_CRAP, 'C', ' ' }, 158 }; 159 160 /** 161 * print_tainted - return a string to represent the kernel taint state. 162 * 163 * 'P' - Proprietary module has been loaded. 164 * 'F' - Module has been forcibly loaded. 165 * 'S' - SMP with CPUs not designed for SMP. 166 * 'R' - User forced a module unload. 167 * 'M' - System experienced a machine check exception. 168 * 'B' - System has hit bad_page. 169 * 'U' - Userspace-defined naughtiness. 170 * 'D' - Kernel has oopsed before 171 * 'A' - ACPI table overridden. 172 * 'W' - Taint on warning. 173 * 'C' - modules from drivers/staging are loaded. 174 * 175 * The string is overwritten by the next call to print_taint(). 176 */ 177 const char *print_tainted(void) 178 { 179 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1]; 180 181 if (tainted_mask) { 182 char *s; 183 int i; 184 185 s = buf + sprintf(buf, "Tainted: "); 186 for (i = 0; i < ARRAY_SIZE(tnts); i++) { 187 const struct tnt *t = &tnts[i]; 188 *s++ = test_bit(t->bit, &tainted_mask) ? 189 t->true : t->false; 190 } 191 *s = 0; 192 } else 193 snprintf(buf, sizeof(buf), "Not tainted"); 194 return(buf); 195 } 196 197 int test_taint(unsigned flag) 198 { 199 return test_bit(flag, &tainted_mask); 200 } 201 EXPORT_SYMBOL(test_taint); 202 203 unsigned long get_taint(void) 204 { 205 return tainted_mask; 206 } 207 208 void add_taint(unsigned flag) 209 { 210 debug_locks = 0; /* can't trust the integrity of the kernel anymore */ 211 set_bit(flag, &tainted_mask); 212 } 213 EXPORT_SYMBOL(add_taint); 214 215 static void spin_msec(int msecs) 216 { 217 int i; 218 219 for (i = 0; i < msecs; i++) { 220 touch_nmi_watchdog(); 221 mdelay(1); 222 } 223 } 224 225 /* 226 * It just happens that oops_enter() and oops_exit() are identically 227 * implemented... 228 */ 229 static void do_oops_enter_exit(void) 230 { 231 unsigned long flags; 232 static int spin_counter; 233 234 if (!pause_on_oops) 235 return; 236 237 spin_lock_irqsave(&pause_on_oops_lock, flags); 238 if (pause_on_oops_flag == 0) { 239 /* This CPU may now print the oops message */ 240 pause_on_oops_flag = 1; 241 } else { 242 /* We need to stall this CPU */ 243 if (!spin_counter) { 244 /* This CPU gets to do the counting */ 245 spin_counter = pause_on_oops; 246 do { 247 spin_unlock(&pause_on_oops_lock); 248 spin_msec(MSEC_PER_SEC); 249 spin_lock(&pause_on_oops_lock); 250 } while (--spin_counter); 251 pause_on_oops_flag = 0; 252 } else { 253 /* This CPU waits for a different one */ 254 while (spin_counter) { 255 spin_unlock(&pause_on_oops_lock); 256 spin_msec(1); 257 spin_lock(&pause_on_oops_lock); 258 } 259 } 260 } 261 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 262 } 263 264 /* 265 * Return true if the calling CPU is allowed to print oops-related info. This 266 * is a bit racy.. 267 */ 268 int oops_may_print(void) 269 { 270 return pause_on_oops_flag == 0; 271 } 272 273 /* 274 * Called when the architecture enters its oops handler, before it prints 275 * anything. If this is the first CPU to oops, and it's oopsing the first time 276 * then let it proceed. 277 * 278 * This is all enabled by the pause_on_oops kernel boot option. We do all this 279 * to ensure that oopses don't scroll off the screen. It has the side-effect 280 * of preventing later-oopsing CPUs from mucking up the display, too. 281 * 282 * It turns out that the CPU which is allowed to print ends up pausing for the 283 * right duration, whereas all the other CPUs pause for twice as long: once in 284 * oops_enter(), once in oops_exit(). 285 */ 286 void oops_enter(void) 287 { 288 debug_locks_off(); /* can't trust the integrity of the kernel anymore */ 289 do_oops_enter_exit(); 290 } 291 292 /* 293 * 64-bit random ID for oopses: 294 */ 295 static u64 oops_id; 296 297 static int init_oops_id(void) 298 { 299 if (!oops_id) 300 get_random_bytes(&oops_id, sizeof(oops_id)); 301 302 return 0; 303 } 304 late_initcall(init_oops_id); 305 306 static void print_oops_end_marker(void) 307 { 308 init_oops_id(); 309 printk(KERN_WARNING "---[ end trace %016llx ]---\n", 310 (unsigned long long)oops_id); 311 } 312 313 /* 314 * Called when the architecture exits its oops handler, after printing 315 * everything. 316 */ 317 void oops_exit(void) 318 { 319 do_oops_enter_exit(); 320 print_oops_end_marker(); 321 } 322 323 #ifdef WANT_WARN_ON_SLOWPATH 324 void warn_on_slowpath(const char *file, int line) 325 { 326 char function[KSYM_SYMBOL_LEN]; 327 unsigned long caller = (unsigned long) __builtin_return_address(0); 328 sprint_symbol(function, caller); 329 330 printk(KERN_WARNING "------------[ cut here ]------------\n"); 331 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, 332 line, function); 333 print_modules(); 334 dump_stack(); 335 print_oops_end_marker(); 336 add_taint(TAINT_WARN); 337 } 338 EXPORT_SYMBOL(warn_on_slowpath); 339 340 341 void warn_slowpath(const char *file, int line, const char *fmt, ...) 342 { 343 va_list args; 344 char function[KSYM_SYMBOL_LEN]; 345 unsigned long caller = (unsigned long)__builtin_return_address(0); 346 sprint_symbol(function, caller); 347 348 printk(KERN_WARNING "------------[ cut here ]------------\n"); 349 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, 350 line, function); 351 va_start(args, fmt); 352 vprintk(fmt, args); 353 va_end(args); 354 355 print_modules(); 356 dump_stack(); 357 print_oops_end_marker(); 358 add_taint(TAINT_WARN); 359 } 360 EXPORT_SYMBOL(warn_slowpath); 361 #endif 362 363 #ifdef CONFIG_CC_STACKPROTECTOR 364 /* 365 * Called when gcc's -fstack-protector feature is used, and 366 * gcc detects corruption of the on-stack canary value 367 */ 368 void __stack_chk_fail(void) 369 { 370 panic("stack-protector: Kernel stack is corrupted"); 371 } 372 EXPORT_SYMBOL(__stack_chk_fail); 373 #endif 374 375 core_param(panic, panic_timeout, int, 0644); 376 core_param(pause_on_oops, pause_on_oops, int, 0644); 377