xref: /linux/arch/x86/kernel/dumpstack_32.c (revision ae87221d3ce49d9de1e43756da834fd0bf05a2ad)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/kexec.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
16 #include <linux/sysfs.h>
17 
18 #include <asm/stacktrace.h>
19 
20 #define STACKSLOTS_PER_LINE 8
21 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
22 
23 int panic_on_unrecovered_nmi;
24 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
25 static unsigned int code_bytes = 64;
26 static int die_counter;
27 
28 void printk_address(unsigned long address, int reliable)
29 {
30 	printk(" [<%p>] %s%pS\n", (void *) address,
31 			reliable ? "" : "? ", (void *) address);
32 }
33 
34 static inline int valid_stack_ptr(struct thread_info *tinfo,
35 			void *p, unsigned int size, void *end)
36 {
37 	void *t = tinfo;
38 	if (end) {
39 		if (p < end && p >= (end-THREAD_SIZE))
40 			return 1;
41 		else
42 			return 0;
43 	}
44 	return p > t && p < t + THREAD_SIZE - size;
45 }
46 
47 /* The form of the top of the frame on the stack */
48 struct stack_frame {
49 	struct stack_frame *next_frame;
50 	unsigned long return_address;
51 };
52 
53 static inline unsigned long
54 print_context_stack(struct thread_info *tinfo,
55 		unsigned long *stack, unsigned long bp,
56 		const struct stacktrace_ops *ops, void *data,
57 		unsigned long *end)
58 {
59 	struct stack_frame *frame = (struct stack_frame *)bp;
60 
61 	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
62 		unsigned long addr;
63 
64 		addr = *stack;
65 		if (__kernel_text_address(addr)) {
66 			if ((unsigned long) stack == bp + sizeof(long)) {
67 				ops->address(data, addr, 1);
68 				frame = frame->next_frame;
69 				bp = (unsigned long) frame;
70 			} else {
71 				ops->address(data, addr, bp == 0);
72 			}
73 		}
74 		stack++;
75 	}
76 	return bp;
77 }
78 
79 void dump_trace(struct task_struct *task, struct pt_regs *regs,
80 		unsigned long *stack, unsigned long bp,
81 		const struct stacktrace_ops *ops, void *data)
82 {
83 	if (!task)
84 		task = current;
85 
86 	if (!stack) {
87 		unsigned long dummy;
88 		stack = &dummy;
89 		if (task && task != current)
90 			stack = (unsigned long *)task->thread.sp;
91 	}
92 
93 #ifdef CONFIG_FRAME_POINTER
94 	if (!bp) {
95 		if (task == current) {
96 			/* Grab bp right from our regs */
97 			get_bp(bp);
98 		} else {
99 			/* bp is the last reg pushed by switch_to */
100 			bp = *(unsigned long *) task->thread.sp;
101 		}
102 	}
103 #endif
104 
105 	for (;;) {
106 		struct thread_info *context;
107 
108 		context = (struct thread_info *)
109 			((unsigned long)stack & (~(THREAD_SIZE - 1)));
110 		bp = print_context_stack(context, stack, bp, ops, data, NULL);
111 
112 		stack = (unsigned long *)context->previous_esp;
113 		if (!stack)
114 			break;
115 		if (ops->stack(data, "IRQ") < 0)
116 			break;
117 		touch_nmi_watchdog();
118 	}
119 }
120 EXPORT_SYMBOL(dump_trace);
121 
122 static void
123 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
124 {
125 	printk(data);
126 	print_symbol(msg, symbol);
127 	printk("\n");
128 }
129 
130 static void print_trace_warning(void *data, char *msg)
131 {
132 	printk("%s%s\n", (char *)data, msg);
133 }
134 
135 static int print_trace_stack(void *data, char *name)
136 {
137 	printk("%s <%s> ", (char *)data, name);
138 	return 0;
139 }
140 
141 /*
142  * Print one address/symbol entries per line.
143  */
144 static void print_trace_address(void *data, unsigned long addr, int reliable)
145 {
146 	touch_nmi_watchdog();
147 	printk(data);
148 	printk_address(addr, reliable);
149 }
150 
151 static const struct stacktrace_ops print_trace_ops = {
152 	.warning = print_trace_warning,
153 	.warning_symbol = print_trace_warning_symbol,
154 	.stack = print_trace_stack,
155 	.address = print_trace_address,
156 };
157 
158 static void
159 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
160 		unsigned long *stack, unsigned long bp, char *log_lvl)
161 {
162 	printk("%sCall Trace:\n", log_lvl);
163 	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
164 }
165 
166 void show_trace(struct task_struct *task, struct pt_regs *regs,
167 		unsigned long *stack, unsigned long bp)
168 {
169 	show_trace_log_lvl(task, regs, stack, bp, "");
170 }
171 
172 static void
173 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
174 		unsigned long *sp, unsigned long bp, char *log_lvl)
175 {
176 	unsigned long *stack;
177 	int i;
178 
179 	if (sp == NULL) {
180 		if (task)
181 			sp = (unsigned long *)task->thread.sp;
182 		else
183 			sp = (unsigned long *)&sp;
184 	}
185 
186 	stack = sp;
187 	for (i = 0; i < kstack_depth_to_print; i++) {
188 		if (kstack_end(stack))
189 			break;
190 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
191 			printk("\n%s", log_lvl);
192 		printk(" %08lx", *stack++);
193 		touch_nmi_watchdog();
194 	}
195 	printk("\n");
196 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
197 }
198 
199 void show_stack(struct task_struct *task, unsigned long *sp)
200 {
201 	show_stack_log_lvl(task, NULL, sp, 0, "");
202 }
203 
204 /*
205  * The architecture-independent dump_stack generator
206  */
207 void dump_stack(void)
208 {
209 	unsigned long bp = 0;
210 	unsigned long stack;
211 
212 #ifdef CONFIG_FRAME_POINTER
213 	if (!bp)
214 		get_bp(bp);
215 #endif
216 
217 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
218 		current->pid, current->comm, print_tainted(),
219 		init_utsname()->release,
220 		(int)strcspn(init_utsname()->version, " "),
221 		init_utsname()->version);
222 	show_trace(NULL, NULL, &stack, bp);
223 }
224 
225 EXPORT_SYMBOL(dump_stack);
226 
227 void show_registers(struct pt_regs *regs)
228 {
229 	int i;
230 
231 	print_modules();
232 	__show_regs(regs, 0);
233 
234 	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
235 		TASK_COMM_LEN, current->comm, task_pid_nr(current),
236 		current_thread_info(), current, task_thread_info(current));
237 	/*
238 	 * When in-kernel, we also print out the stack and code at the
239 	 * time of the fault..
240 	 */
241 	if (!user_mode_vm(regs)) {
242 		unsigned int code_prologue = code_bytes * 43 / 64;
243 		unsigned int code_len = code_bytes;
244 		unsigned char c;
245 		u8 *ip;
246 
247 		printk(KERN_EMERG "Stack:\n");
248 		show_stack_log_lvl(NULL, regs, &regs->sp,
249 				0, KERN_EMERG);
250 
251 		printk(KERN_EMERG "Code: ");
252 
253 		ip = (u8 *)regs->ip - code_prologue;
254 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
255 			/* try starting at IP */
256 			ip = (u8 *)regs->ip;
257 			code_len = code_len - code_prologue + 1;
258 		}
259 		for (i = 0; i < code_len; i++, ip++) {
260 			if (ip < (u8 *)PAGE_OFFSET ||
261 					probe_kernel_address(ip, c)) {
262 				printk(" Bad EIP value.");
263 				break;
264 			}
265 			if (ip == (u8 *)regs->ip)
266 				printk("<%02x> ", c);
267 			else
268 				printk("%02x ", c);
269 		}
270 	}
271 	printk("\n");
272 }
273 
274 int is_valid_bugaddr(unsigned long ip)
275 {
276 	unsigned short ud2;
277 
278 	if (ip < PAGE_OFFSET)
279 		return 0;
280 	if (probe_kernel_address((unsigned short *)ip, ud2))
281 		return 0;
282 
283 	return ud2 == 0x0b0f;
284 }
285 
286 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
287 static int die_owner = -1;
288 static unsigned int die_nest_count;
289 
290 unsigned __kprobes long oops_begin(void)
291 {
292 	unsigned long flags;
293 
294 	oops_enter();
295 
296 	if (die_owner != raw_smp_processor_id()) {
297 		console_verbose();
298 		raw_local_irq_save(flags);
299 		__raw_spin_lock(&die_lock);
300 		die_owner = smp_processor_id();
301 		die_nest_count = 0;
302 		bust_spinlocks(1);
303 	} else {
304 		raw_local_irq_save(flags);
305 	}
306 	die_nest_count++;
307 	return flags;
308 }
309 
310 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
311 {
312 	bust_spinlocks(0);
313 	die_owner = -1;
314 	add_taint(TAINT_DIE);
315 	__raw_spin_unlock(&die_lock);
316 	raw_local_irq_restore(flags);
317 
318 	if (!regs)
319 		return;
320 
321 	if (kexec_should_crash(current))
322 		crash_kexec(regs);
323 	if (in_interrupt())
324 		panic("Fatal exception in interrupt");
325 	if (panic_on_oops)
326 		panic("Fatal exception");
327 	oops_exit();
328 	do_exit(signr);
329 }
330 
331 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
332 {
333 	unsigned short ss;
334 	unsigned long sp;
335 
336 	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
337 #ifdef CONFIG_PREEMPT
338 	printk("PREEMPT ");
339 #endif
340 #ifdef CONFIG_SMP
341 	printk("SMP ");
342 #endif
343 #ifdef CONFIG_DEBUG_PAGEALLOC
344 	printk("DEBUG_PAGEALLOC");
345 #endif
346 	printk("\n");
347 	sysfs_printk_last_file();
348 	if (notify_die(DIE_OOPS, str, regs, err,
349 			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
350 		return 1;
351 
352 	show_registers(regs);
353 	/* Executive summary in case the oops scrolled away */
354 	sp = (unsigned long) (&regs->sp);
355 	savesegment(ss, ss);
356 	if (user_mode(regs)) {
357 		sp = regs->sp;
358 		ss = regs->ss & 0xffff;
359 	}
360 	printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
361 	print_symbol("%s", regs->ip);
362 	printk(" SS:ESP %04x:%08lx\n", ss, sp);
363 	return 0;
364 }
365 
366 /*
367  * This is gone through when something in the kernel has done something bad
368  * and is about to be terminated:
369  */
370 void die(const char *str, struct pt_regs *regs, long err)
371 {
372 	unsigned long flags = oops_begin();
373 
374 	if (die_nest_count < 3) {
375 		report_bug(regs->ip, regs);
376 
377 		if (__die(str, regs, err))
378 			regs = NULL;
379 	} else {
380 		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
381 	}
382 
383 	oops_end(flags, regs, SIGSEGV);
384 }
385 
386 static DEFINE_SPINLOCK(nmi_print_lock);
387 
388 void notrace __kprobes
389 die_nmi(char *str, struct pt_regs *regs, int do_panic)
390 {
391 	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
392 		return;
393 
394 	spin_lock(&nmi_print_lock);
395 	/*
396 	* We are in trouble anyway, lets at least try
397 	* to get a message out:
398 	*/
399 	bust_spinlocks(1);
400 	printk(KERN_EMERG "%s", str);
401 	printk(" on CPU%d, ip %08lx, registers:\n",
402 		smp_processor_id(), regs->ip);
403 	show_registers(regs);
404 	if (do_panic)
405 		panic("Non maskable interrupt");
406 	console_silent();
407 	spin_unlock(&nmi_print_lock);
408 	bust_spinlocks(0);
409 
410 	/*
411 	 * If we are in kernel we are probably nested up pretty bad
412 	 * and might aswell get out now while we still can:
413 	 */
414 	if (!user_mode_vm(regs)) {
415 		current->thread.trap_no = 2;
416 		crash_kexec(regs);
417 	}
418 
419 	do_exit(SIGSEGV);
420 }
421 
422 static int __init oops_setup(char *s)
423 {
424 	if (!s)
425 		return -EINVAL;
426 	if (!strcmp(s, "panic"))
427 		panic_on_oops = 1;
428 	return 0;
429 }
430 early_param("oops", oops_setup);
431 
432 static int __init kstack_setup(char *s)
433 {
434 	if (!s)
435 		return -EINVAL;
436 	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
437 	return 0;
438 }
439 early_param("kstack", kstack_setup);
440 
441 static int __init code_bytes_setup(char *s)
442 {
443 	code_bytes = simple_strtoul(s, NULL, 0);
444 	if (code_bytes > 8192)
445 		code_bytes = 8192;
446 
447 	return 1;
448 }
449 __setup("code_bytes=", code_bytes_setup);
450