1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 */ 5 #include <linux/kallsyms.h> 6 #include <linux/kprobes.h> 7 #include <linux/uaccess.h> 8 #include <linux/utsname.h> 9 #include <linux/hardirq.h> 10 #include <linux/kdebug.h> 11 #include <linux/module.h> 12 #include <linux/ptrace.h> 13 #include <linux/kexec.h> 14 #include <linux/bug.h> 15 #include <linux/nmi.h> 16 17 #include <asm/stacktrace.h> 18 19 #define STACKSLOTS_PER_LINE 8 20 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :) 21 22 int panic_on_unrecovered_nmi; 23 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; 24 static unsigned int code_bytes = 64; 25 static int die_counter; 26 27 void printk_address(unsigned long address, int reliable) 28 { 29 printk(" [<%p>] %s%pS\n", (void *) address, 30 reliable ? "" : "? ", (void *) address); 31 } 32 33 static inline int valid_stack_ptr(struct thread_info *tinfo, 34 void *p, unsigned int size, void *end) 35 { 36 void *t = tinfo; 37 if (end) { 38 if (p < end && p >= (end-THREAD_SIZE)) 39 return 1; 40 else 41 return 0; 42 } 43 return p > t && p < t + THREAD_SIZE - size; 44 } 45 46 /* The form of the top of the frame on the stack */ 47 struct stack_frame { 48 struct stack_frame *next_frame; 49 unsigned long return_address; 50 }; 51 52 static inline unsigned long 53 print_context_stack(struct thread_info *tinfo, 54 unsigned long *stack, unsigned long bp, 55 const struct stacktrace_ops *ops, void *data, 56 unsigned long *end) 57 { 58 struct stack_frame *frame = (struct stack_frame *)bp; 59 60 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { 61 unsigned long addr; 62 63 addr = *stack; 64 if (__kernel_text_address(addr)) { 65 if ((unsigned long) stack == bp + sizeof(long)) { 66 ops->address(data, addr, 1); 67 frame = frame->next_frame; 68 bp = (unsigned long) frame; 69 } else { 70 ops->address(data, addr, bp == 0); 71 } 72 } 73 stack++; 74 } 75 return bp; 76 } 77 78 void dump_trace(struct task_struct *task, struct pt_regs *regs, 79 unsigned long *stack, unsigned long bp, 80 const struct stacktrace_ops *ops, void *data) 81 { 82 if (!task) 83 task = current; 84 85 if (!stack) { 86 unsigned long dummy; 87 stack = &dummy; 88 if (task && task != current) 89 stack = (unsigned long *)task->thread.sp; 90 } 91 92 #ifdef CONFIG_FRAME_POINTER 93 if (!bp) { 94 if (task == current) { 95 /* Grab bp right from our regs */ 96 get_bp(bp); 97 } else { 98 /* bp is the last reg pushed by switch_to */ 99 bp = *(unsigned long *) task->thread.sp; 100 } 101 } 102 #endif 103 104 for (;;) { 105 struct thread_info *context; 106 107 context = (struct thread_info *) 108 ((unsigned long)stack & (~(THREAD_SIZE - 1))); 109 bp = print_context_stack(context, stack, bp, ops, data, NULL); 110 111 stack = (unsigned long *)context->previous_esp; 112 if (!stack) 113 break; 114 if (ops->stack(data, "IRQ") < 0) 115 break; 116 touch_nmi_watchdog(); 117 } 118 } 119 EXPORT_SYMBOL(dump_trace); 120 121 static void 122 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) 123 { 124 printk(data); 125 print_symbol(msg, symbol); 126 printk("\n"); 127 } 128 129 static void print_trace_warning(void *data, char *msg) 130 { 131 printk("%s%s\n", (char *)data, msg); 132 } 133 134 static int print_trace_stack(void *data, char *name) 135 { 136 printk("%s <%s> ", (char *)data, name); 137 return 0; 138 } 139 140 /* 141 * Print one address/symbol entries per line. 142 */ 143 static void print_trace_address(void *data, unsigned long addr, int reliable) 144 { 145 touch_nmi_watchdog(); 146 printk(data); 147 printk_address(addr, reliable); 148 } 149 150 static const struct stacktrace_ops print_trace_ops = { 151 .warning = print_trace_warning, 152 .warning_symbol = print_trace_warning_symbol, 153 .stack = print_trace_stack, 154 .address = print_trace_address, 155 }; 156 157 static void 158 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 159 unsigned long *stack, unsigned long bp, char *log_lvl) 160 { 161 printk("%sCall Trace:\n", log_lvl); 162 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 163 } 164 165 void show_trace(struct task_struct *task, struct pt_regs *regs, 166 unsigned long *stack, unsigned long bp) 167 { 168 show_trace_log_lvl(task, regs, stack, bp, ""); 169 } 170 171 static void 172 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 173 unsigned long *sp, unsigned long bp, char *log_lvl) 174 { 175 unsigned long *stack; 176 int i; 177 178 if (sp == NULL) { 179 if (task) 180 sp = (unsigned long *)task->thread.sp; 181 else 182 sp = (unsigned long *)&sp; 183 } 184 185 stack = sp; 186 for (i = 0; i < kstack_depth_to_print; i++) { 187 if (kstack_end(stack)) 188 break; 189 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 190 printk("\n%s", log_lvl); 191 printk(" %08lx", *stack++); 192 touch_nmi_watchdog(); 193 } 194 printk("\n"); 195 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 196 } 197 198 void show_stack(struct task_struct *task, unsigned long *sp) 199 { 200 show_stack_log_lvl(task, NULL, sp, 0, ""); 201 } 202 203 /* 204 * The architecture-independent dump_stack generator 205 */ 206 void dump_stack(void) 207 { 208 unsigned long bp = 0; 209 unsigned long stack; 210 211 #ifdef CONFIG_FRAME_POINTER 212 if (!bp) 213 get_bp(bp); 214 #endif 215 216 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 217 current->pid, current->comm, print_tainted(), 218 init_utsname()->release, 219 (int)strcspn(init_utsname()->version, " "), 220 init_utsname()->version); 221 show_trace(NULL, NULL, &stack, bp); 222 } 223 224 EXPORT_SYMBOL(dump_stack); 225 226 void show_registers(struct pt_regs *regs) 227 { 228 int i; 229 230 print_modules(); 231 __show_regs(regs, 0); 232 233 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", 234 TASK_COMM_LEN, current->comm, task_pid_nr(current), 235 current_thread_info(), current, task_thread_info(current)); 236 /* 237 * When in-kernel, we also print out the stack and code at the 238 * time of the fault.. 239 */ 240 if (!user_mode_vm(regs)) { 241 unsigned int code_prologue = code_bytes * 43 / 64; 242 unsigned int code_len = code_bytes; 243 unsigned char c; 244 u8 *ip; 245 246 printk(KERN_EMERG "Stack:\n"); 247 show_stack_log_lvl(NULL, regs, ®s->sp, 248 0, KERN_EMERG); 249 250 printk(KERN_EMERG "Code: "); 251 252 ip = (u8 *)regs->ip - code_prologue; 253 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { 254 /* try starting at IP */ 255 ip = (u8 *)regs->ip; 256 code_len = code_len - code_prologue + 1; 257 } 258 for (i = 0; i < code_len; i++, ip++) { 259 if (ip < (u8 *)PAGE_OFFSET || 260 probe_kernel_address(ip, c)) { 261 printk(" Bad EIP value."); 262 break; 263 } 264 if (ip == (u8 *)regs->ip) 265 printk("<%02x> ", c); 266 else 267 printk("%02x ", c); 268 } 269 } 270 printk("\n"); 271 } 272 273 int is_valid_bugaddr(unsigned long ip) 274 { 275 unsigned short ud2; 276 277 if (ip < PAGE_OFFSET) 278 return 0; 279 if (probe_kernel_address((unsigned short *)ip, ud2)) 280 return 0; 281 282 return ud2 == 0x0b0f; 283 } 284 285 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 286 static int die_owner = -1; 287 static unsigned int die_nest_count; 288 289 unsigned __kprobes long oops_begin(void) 290 { 291 unsigned long flags; 292 293 oops_enter(); 294 295 if (die_owner != raw_smp_processor_id()) { 296 console_verbose(); 297 raw_local_irq_save(flags); 298 __raw_spin_lock(&die_lock); 299 die_owner = smp_processor_id(); 300 die_nest_count = 0; 301 bust_spinlocks(1); 302 } else { 303 raw_local_irq_save(flags); 304 } 305 die_nest_count++; 306 return flags; 307 } 308 309 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) 310 { 311 bust_spinlocks(0); 312 die_owner = -1; 313 add_taint(TAINT_DIE); 314 __raw_spin_unlock(&die_lock); 315 raw_local_irq_restore(flags); 316 317 if (!regs) 318 return; 319 320 if (kexec_should_crash(current)) 321 crash_kexec(regs); 322 if (in_interrupt()) 323 panic("Fatal exception in interrupt"); 324 if (panic_on_oops) 325 panic("Fatal exception"); 326 oops_exit(); 327 do_exit(signr); 328 } 329 330 int __kprobes __die(const char *str, struct pt_regs *regs, long err) 331 { 332 unsigned short ss; 333 unsigned long sp; 334 335 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 336 #ifdef CONFIG_PREEMPT 337 printk("PREEMPT "); 338 #endif 339 #ifdef CONFIG_SMP 340 printk("SMP "); 341 #endif 342 #ifdef CONFIG_DEBUG_PAGEALLOC 343 printk("DEBUG_PAGEALLOC"); 344 #endif 345 printk("\n"); 346 if (notify_die(DIE_OOPS, str, regs, err, 347 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 348 return 1; 349 350 show_registers(regs); 351 /* Executive summary in case the oops scrolled away */ 352 sp = (unsigned long) (®s->sp); 353 savesegment(ss, ss); 354 if (user_mode(regs)) { 355 sp = regs->sp; 356 ss = regs->ss & 0xffff; 357 } 358 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); 359 print_symbol("%s", regs->ip); 360 printk(" SS:ESP %04x:%08lx\n", ss, sp); 361 return 0; 362 } 363 364 /* 365 * This is gone through when something in the kernel has done something bad 366 * and is about to be terminated: 367 */ 368 void die(const char *str, struct pt_regs *regs, long err) 369 { 370 unsigned long flags = oops_begin(); 371 372 if (die_nest_count < 3) { 373 report_bug(regs->ip, regs); 374 375 if (__die(str, regs, err)) 376 regs = NULL; 377 } else { 378 printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); 379 } 380 381 oops_end(flags, regs, SIGSEGV); 382 } 383 384 static DEFINE_SPINLOCK(nmi_print_lock); 385 386 void notrace __kprobes 387 die_nmi(char *str, struct pt_regs *regs, int do_panic) 388 { 389 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) 390 return; 391 392 spin_lock(&nmi_print_lock); 393 /* 394 * We are in trouble anyway, lets at least try 395 * to get a message out: 396 */ 397 bust_spinlocks(1); 398 printk(KERN_EMERG "%s", str); 399 printk(" on CPU%d, ip %08lx, registers:\n", 400 smp_processor_id(), regs->ip); 401 show_registers(regs); 402 if (do_panic) 403 panic("Non maskable interrupt"); 404 console_silent(); 405 spin_unlock(&nmi_print_lock); 406 bust_spinlocks(0); 407 408 /* 409 * If we are in kernel we are probably nested up pretty bad 410 * and might aswell get out now while we still can: 411 */ 412 if (!user_mode_vm(regs)) { 413 current->thread.trap_no = 2; 414 crash_kexec(regs); 415 } 416 417 do_exit(SIGSEGV); 418 } 419 420 static int __init oops_setup(char *s) 421 { 422 if (!s) 423 return -EINVAL; 424 if (!strcmp(s, "panic")) 425 panic_on_oops = 1; 426 return 0; 427 } 428 early_param("oops", oops_setup); 429 430 static int __init kstack_setup(char *s) 431 { 432 if (!s) 433 return -EINVAL; 434 kstack_depth_to_print = simple_strtoul(s, NULL, 0); 435 return 0; 436 } 437 early_param("kstack", kstack_setup); 438 439 static int __init code_bytes_setup(char *s) 440 { 441 code_bytes = simple_strtoul(s, NULL, 0); 442 if (code_bytes > 8192) 443 code_bytes = 8192; 444 445 return 1; 446 } 447 __setup("code_bytes=", code_bytes_setup); 448