1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 */ 5 #include <linux/kallsyms.h> 6 #include <linux/kprobes.h> 7 #include <linux/uaccess.h> 8 #include <linux/utsname.h> 9 #include <linux/hardirq.h> 10 #include <linux/kdebug.h> 11 #include <linux/module.h> 12 #include <linux/ptrace.h> 13 #include <linux/ftrace.h> 14 #include <linux/kexec.h> 15 #include <linux/bug.h> 16 #include <linux/nmi.h> 17 #include <linux/sysfs.h> 18 19 #include <asm/stacktrace.h> 20 21 22 int panic_on_unrecovered_nmi; 23 int panic_on_io_nmi; 24 unsigned int code_bytes = 64; 25 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; 26 static int die_counter; 27 28 static void printk_stack_address(unsigned long address, int reliable, 29 void *data) 30 { 31 printk("%s [<%p>] %s%pB\n", 32 (char *)data, (void *)address, reliable ? "" : "? ", 33 (void *)address); 34 } 35 36 void printk_address(unsigned long address) 37 { 38 pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address); 39 } 40 41 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 42 static void 43 print_ftrace_graph_addr(unsigned long addr, void *data, 44 const struct stacktrace_ops *ops, 45 struct thread_info *tinfo, int *graph) 46 { 47 struct task_struct *task; 48 unsigned long ret_addr; 49 int index; 50 51 if (addr != (unsigned long)return_to_handler) 52 return; 53 54 task = tinfo->task; 55 index = task->curr_ret_stack; 56 57 if (!task->ret_stack || index < *graph) 58 return; 59 60 index -= *graph; 61 ret_addr = task->ret_stack[index].ret; 62 63 ops->address(data, ret_addr, 1); 64 65 (*graph)++; 66 } 67 #else 68 static inline void 69 print_ftrace_graph_addr(unsigned long addr, void *data, 70 const struct stacktrace_ops *ops, 71 struct thread_info *tinfo, int *graph) 72 { } 73 #endif 74 75 /* 76 * x86-64 can have up to three kernel stacks: 77 * process stack 78 * interrupt stack 79 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 80 */ 81 82 static inline int valid_stack_ptr(struct thread_info *tinfo, 83 void *p, unsigned int size, void *end) 84 { 85 void *t = tinfo; 86 if (end) { 87 if (p < end && p >= (end-THREAD_SIZE)) 88 return 1; 89 else 90 return 0; 91 } 92 return p > t && p < t + THREAD_SIZE - size; 93 } 94 95 unsigned long 96 print_context_stack(struct thread_info *tinfo, 97 unsigned long *stack, unsigned long bp, 98 const struct stacktrace_ops *ops, void *data, 99 unsigned long *end, int *graph) 100 { 101 struct stack_frame *frame = (struct stack_frame *)bp; 102 103 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { 104 unsigned long addr; 105 106 addr = *stack; 107 if (__kernel_text_address(addr)) { 108 if ((unsigned long) stack == bp + sizeof(long)) { 109 ops->address(data, addr, 1); 110 frame = frame->next_frame; 111 bp = (unsigned long) frame; 112 } else { 113 ops->address(data, addr, 0); 114 } 115 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 116 } 117 stack++; 118 } 119 return bp; 120 } 121 EXPORT_SYMBOL_GPL(print_context_stack); 122 123 unsigned long 124 print_context_stack_bp(struct thread_info *tinfo, 125 unsigned long *stack, unsigned long bp, 126 const struct stacktrace_ops *ops, void *data, 127 unsigned long *end, int *graph) 128 { 129 struct stack_frame *frame = (struct stack_frame *)bp; 130 unsigned long *ret_addr = &frame->return_address; 131 132 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 133 unsigned long addr = *ret_addr; 134 135 if (!__kernel_text_address(addr)) 136 break; 137 138 if (ops->address(data, addr, 1)) 139 break; 140 frame = frame->next_frame; 141 ret_addr = &frame->return_address; 142 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 143 } 144 145 return (unsigned long)frame; 146 } 147 EXPORT_SYMBOL_GPL(print_context_stack_bp); 148 149 static int print_trace_stack(void *data, char *name) 150 { 151 printk("%s <%s> ", (char *)data, name); 152 return 0; 153 } 154 155 /* 156 * Print one address/symbol entries per line. 157 */ 158 static int print_trace_address(void *data, unsigned long addr, int reliable) 159 { 160 touch_nmi_watchdog(); 161 printk_stack_address(addr, reliable, data); 162 return 0; 163 } 164 165 static const struct stacktrace_ops print_trace_ops = { 166 .stack = print_trace_stack, 167 .address = print_trace_address, 168 .walk_stack = print_context_stack, 169 }; 170 171 void 172 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 173 unsigned long *stack, unsigned long bp, char *log_lvl) 174 { 175 printk("%sCall Trace:\n", log_lvl); 176 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 177 } 178 179 void show_trace(struct task_struct *task, struct pt_regs *regs, 180 unsigned long *stack, unsigned long bp) 181 { 182 show_trace_log_lvl(task, regs, stack, bp, ""); 183 } 184 185 void show_stack(struct task_struct *task, unsigned long *sp) 186 { 187 unsigned long bp = 0; 188 unsigned long stack; 189 190 /* 191 * Stack frames below this one aren't interesting. Don't show them 192 * if we're printing for %current. 193 */ 194 if (!sp && (!task || task == current)) { 195 sp = &stack; 196 bp = stack_frame(current, NULL); 197 } 198 199 show_stack_log_lvl(task, NULL, sp, bp, ""); 200 } 201 202 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 203 static int die_owner = -1; 204 static unsigned int die_nest_count; 205 206 unsigned long oops_begin(void) 207 { 208 int cpu; 209 unsigned long flags; 210 211 oops_enter(); 212 213 /* racy, but better than risking deadlock. */ 214 raw_local_irq_save(flags); 215 cpu = smp_processor_id(); 216 if (!arch_spin_trylock(&die_lock)) { 217 if (cpu == die_owner) 218 /* nested oops. should stop eventually */; 219 else 220 arch_spin_lock(&die_lock); 221 } 222 die_nest_count++; 223 die_owner = cpu; 224 console_verbose(); 225 bust_spinlocks(1); 226 return flags; 227 } 228 EXPORT_SYMBOL_GPL(oops_begin); 229 NOKPROBE_SYMBOL(oops_begin); 230 231 void oops_end(unsigned long flags, struct pt_regs *regs, int signr) 232 { 233 if (regs && kexec_should_crash(current)) 234 crash_kexec(regs); 235 236 bust_spinlocks(0); 237 die_owner = -1; 238 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 239 die_nest_count--; 240 if (!die_nest_count) 241 /* Nest count reaches zero, release the lock. */ 242 arch_spin_unlock(&die_lock); 243 raw_local_irq_restore(flags); 244 oops_exit(); 245 246 if (!signr) 247 return; 248 if (in_interrupt()) 249 panic("Fatal exception in interrupt"); 250 if (panic_on_oops) 251 panic("Fatal exception"); 252 do_exit(signr); 253 } 254 NOKPROBE_SYMBOL(oops_end); 255 256 int __die(const char *str, struct pt_regs *regs, long err) 257 { 258 #ifdef CONFIG_X86_32 259 unsigned short ss; 260 unsigned long sp; 261 #endif 262 printk(KERN_DEFAULT 263 "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, 264 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", 265 IS_ENABLED(CONFIG_SMP) ? " SMP" : "", 266 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", 267 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); 268 269 if (notify_die(DIE_OOPS, str, regs, err, 270 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) 271 return 1; 272 273 print_modules(); 274 show_regs(regs); 275 #ifdef CONFIG_X86_32 276 if (user_mode(regs)) { 277 sp = regs->sp; 278 ss = regs->ss & 0xffff; 279 } else { 280 sp = kernel_stack_pointer(regs); 281 savesegment(ss, ss); 282 } 283 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); 284 print_symbol("%s", regs->ip); 285 printk(" SS:ESP %04x:%08lx\n", ss, sp); 286 #else 287 /* Executive summary in case the oops scrolled away */ 288 printk(KERN_ALERT "RIP "); 289 printk_address(regs->ip); 290 printk(" RSP <%016lx>\n", regs->sp); 291 #endif 292 return 0; 293 } 294 NOKPROBE_SYMBOL(__die); 295 296 /* 297 * This is gone through when something in the kernel has done something bad 298 * and is about to be terminated: 299 */ 300 void die(const char *str, struct pt_regs *regs, long err) 301 { 302 unsigned long flags = oops_begin(); 303 int sig = SIGSEGV; 304 305 if (!user_mode(regs)) 306 report_bug(regs->ip, regs); 307 308 if (__die(str, regs, err)) 309 sig = 0; 310 oops_end(flags, regs, sig); 311 } 312 313 static int __init kstack_setup(char *s) 314 { 315 ssize_t ret; 316 unsigned long val; 317 318 if (!s) 319 return -EINVAL; 320 321 ret = kstrtoul(s, 0, &val); 322 if (ret) 323 return ret; 324 kstack_depth_to_print = val; 325 return 0; 326 } 327 early_param("kstack", kstack_setup); 328 329 static int __init code_bytes_setup(char *s) 330 { 331 ssize_t ret; 332 unsigned long val; 333 334 if (!s) 335 return -EINVAL; 336 337 ret = kstrtoul(s, 0, &val); 338 if (ret) 339 return ret; 340 341 code_bytes = val; 342 if (code_bytes > 8192) 343 code_bytes = 8192; 344 345 return 1; 346 } 347 __setup("code_bytes=", code_bytes_setup); 348