xref: /linux/arch/x86/kernel/dumpstack_64.c (revision 871d3779cba18b028e34d0d2f6cc6caae76a97b6)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/kexec.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
16 #include <linux/sysfs.h>
17 
18 #include <asm/stacktrace.h>
19 
20 #define STACKSLOTS_PER_LINE 4
21 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
22 
23 int panic_on_unrecovered_nmi;
24 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
25 static unsigned int code_bytes = 64;
26 static int die_counter;
27 
28 void printk_address(unsigned long address, int reliable)
29 {
30 	printk(" [<%p>] %s%pS\n", (void *) address,
31 			reliable ? "" : "? ", (void *) address);
32 }
33 
34 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
35 					unsigned *usedp, char **idp)
36 {
37 	static char ids[][8] = {
38 		[DEBUG_STACK - 1] = "#DB",
39 		[NMI_STACK - 1] = "NMI",
40 		[DOUBLEFAULT_STACK - 1] = "#DF",
41 		[STACKFAULT_STACK - 1] = "#SS",
42 		[MCE_STACK - 1] = "#MC",
43 #if DEBUG_STKSZ > EXCEPTION_STKSZ
44 		[N_EXCEPTION_STACKS ...
45 			N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
46 #endif
47 	};
48 	unsigned k;
49 
50 	/*
51 	 * Iterate over all exception stacks, and figure out whether
52 	 * 'stack' is in one of them:
53 	 */
54 	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
55 		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
56 		/*
57 		 * Is 'stack' above this exception frame's end?
58 		 * If yes then skip to the next frame.
59 		 */
60 		if (stack >= end)
61 			continue;
62 		/*
63 		 * Is 'stack' above this exception frame's start address?
64 		 * If yes then we found the right frame.
65 		 */
66 		if (stack >= end - EXCEPTION_STKSZ) {
67 			/*
68 			 * Make sure we only iterate through an exception
69 			 * stack once. If it comes up for the second time
70 			 * then there's something wrong going on - just
71 			 * break out and return NULL:
72 			 */
73 			if (*usedp & (1U << k))
74 				break;
75 			*usedp |= 1U << k;
76 			*idp = ids[k];
77 			return (unsigned long *)end;
78 		}
79 		/*
80 		 * If this is a debug stack, and if it has a larger size than
81 		 * the usual exception stacks, then 'stack' might still
82 		 * be within the lower portion of the debug stack:
83 		 */
84 #if DEBUG_STKSZ > EXCEPTION_STKSZ
85 		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
86 			unsigned j = N_EXCEPTION_STACKS - 1;
87 
88 			/*
89 			 * Black magic. A large debug stack is composed of
90 			 * multiple exception stack entries, which we
91 			 * iterate through now. Dont look:
92 			 */
93 			do {
94 				++j;
95 				end -= EXCEPTION_STKSZ;
96 				ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
97 			} while (stack < end - EXCEPTION_STKSZ);
98 			if (*usedp & (1U << j))
99 				break;
100 			*usedp |= 1U << j;
101 			*idp = ids[j];
102 			return (unsigned long *)end;
103 		}
104 #endif
105 	}
106 	return NULL;
107 }
108 
109 /*
110  * x86-64 can have up to three kernel stacks:
111  * process stack
112  * interrupt stack
113  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
114  */
115 
116 static inline int valid_stack_ptr(struct thread_info *tinfo,
117 			void *p, unsigned int size, void *end)
118 {
119 	void *t = tinfo;
120 	if (end) {
121 		if (p < end && p >= (end-THREAD_SIZE))
122 			return 1;
123 		else
124 			return 0;
125 	}
126 	return p > t && p < t + THREAD_SIZE - size;
127 }
128 
129 /* The form of the top of the frame on the stack */
130 struct stack_frame {
131 	struct stack_frame *next_frame;
132 	unsigned long return_address;
133 };
134 
135 static inline unsigned long
136 print_context_stack(struct thread_info *tinfo,
137 		unsigned long *stack, unsigned long bp,
138 		const struct stacktrace_ops *ops, void *data,
139 		unsigned long *end)
140 {
141 	struct stack_frame *frame = (struct stack_frame *)bp;
142 
143 	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
144 		unsigned long addr;
145 
146 		addr = *stack;
147 		if (__kernel_text_address(addr)) {
148 			if ((unsigned long) stack == bp + sizeof(long)) {
149 				ops->address(data, addr, 1);
150 				frame = frame->next_frame;
151 				bp = (unsigned long) frame;
152 			} else {
153 				ops->address(data, addr, bp == 0);
154 			}
155 		}
156 		stack++;
157 	}
158 	return bp;
159 }
160 
161 void dump_trace(struct task_struct *task, struct pt_regs *regs,
162 		unsigned long *stack, unsigned long bp,
163 		const struct stacktrace_ops *ops, void *data)
164 {
165 	const unsigned cpu = get_cpu();
166 	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
167 	unsigned used = 0;
168 	struct thread_info *tinfo;
169 
170 	if (!task)
171 		task = current;
172 
173 	if (!stack) {
174 		unsigned long dummy;
175 		stack = &dummy;
176 		if (task && task != current)
177 			stack = (unsigned long *)task->thread.sp;
178 	}
179 
180 #ifdef CONFIG_FRAME_POINTER
181 	if (!bp) {
182 		if (task == current) {
183 			/* Grab bp right from our regs */
184 			get_bp(bp);
185 		} else {
186 			/* bp is the last reg pushed by switch_to */
187 			bp = *(unsigned long *) task->thread.sp;
188 		}
189 	}
190 #endif
191 
192 	/*
193 	 * Print function call entries in all stacks, starting at the
194 	 * current stack address. If the stacks consist of nested
195 	 * exceptions
196 	 */
197 	tinfo = task_thread_info(task);
198 	for (;;) {
199 		char *id;
200 		unsigned long *estack_end;
201 		estack_end = in_exception_stack(cpu, (unsigned long)stack,
202 						&used, &id);
203 
204 		if (estack_end) {
205 			if (ops->stack(data, id) < 0)
206 				break;
207 
208 			bp = print_context_stack(tinfo, stack, bp, ops,
209 							data, estack_end);
210 			ops->stack(data, "<EOE>");
211 			/*
212 			 * We link to the next stack via the
213 			 * second-to-last pointer (index -2 to end) in the
214 			 * exception stack:
215 			 */
216 			stack = (unsigned long *) estack_end[-2];
217 			continue;
218 		}
219 		if (irqstack_end) {
220 			unsigned long *irqstack;
221 			irqstack = irqstack_end -
222 				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
223 
224 			if (stack >= irqstack && stack < irqstack_end) {
225 				if (ops->stack(data, "IRQ") < 0)
226 					break;
227 				bp = print_context_stack(tinfo, stack, bp,
228 						ops, data, irqstack_end);
229 				/*
230 				 * We link to the next stack (which would be
231 				 * the process stack normally) the last
232 				 * pointer (index -1 to end) in the IRQ stack:
233 				 */
234 				stack = (unsigned long *) (irqstack_end[-1]);
235 				irqstack_end = NULL;
236 				ops->stack(data, "EOI");
237 				continue;
238 			}
239 		}
240 		break;
241 	}
242 
243 	/*
244 	 * This handles the process stack:
245 	 */
246 	bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
247 	put_cpu();
248 }
249 EXPORT_SYMBOL(dump_trace);
250 
251 static void
252 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
253 {
254 	printk(data);
255 	print_symbol(msg, symbol);
256 	printk("\n");
257 }
258 
259 static void print_trace_warning(void *data, char *msg)
260 {
261 	printk("%s%s\n", (char *)data, msg);
262 }
263 
264 static int print_trace_stack(void *data, char *name)
265 {
266 	printk("%s <%s> ", (char *)data, name);
267 	return 0;
268 }
269 
270 /*
271  * Print one address/symbol entries per line.
272  */
273 static void print_trace_address(void *data, unsigned long addr, int reliable)
274 {
275 	touch_nmi_watchdog();
276 	printk(data);
277 	printk_address(addr, reliable);
278 }
279 
280 static const struct stacktrace_ops print_trace_ops = {
281 	.warning = print_trace_warning,
282 	.warning_symbol = print_trace_warning_symbol,
283 	.stack = print_trace_stack,
284 	.address = print_trace_address,
285 };
286 
287 static void
288 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
289 		unsigned long *stack, unsigned long bp, char *log_lvl)
290 {
291 	printk("%sCall Trace:\n", log_lvl);
292 	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
293 }
294 
295 void show_trace(struct task_struct *task, struct pt_regs *regs,
296 		unsigned long *stack, unsigned long bp)
297 {
298 	show_trace_log_lvl(task, regs, stack, bp, "");
299 }
300 
301 static void
302 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
303 		unsigned long *sp, unsigned long bp, char *log_lvl)
304 {
305 	unsigned long *stack;
306 	int i;
307 	const int cpu = smp_processor_id();
308 	unsigned long *irqstack_end =
309 		(unsigned long *) (cpu_pda(cpu)->irqstackptr);
310 	unsigned long *irqstack =
311 		(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
312 
313 	/*
314 	 * debugging aid: "show_stack(NULL, NULL);" prints the
315 	 * back trace for this cpu.
316 	 */
317 
318 	if (sp == NULL) {
319 		if (task)
320 			sp = (unsigned long *)task->thread.sp;
321 		else
322 			sp = (unsigned long *)&sp;
323 	}
324 
325 	stack = sp;
326 	for (i = 0; i < kstack_depth_to_print; i++) {
327 		if (stack >= irqstack && stack <= irqstack_end) {
328 			if (stack == irqstack_end) {
329 				stack = (unsigned long *) (irqstack_end[-1]);
330 				printk(" <EOI> ");
331 			}
332 		} else {
333 		if (((long) stack & (THREAD_SIZE-1)) == 0)
334 			break;
335 		}
336 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
337 			printk("\n%s", log_lvl);
338 		printk(" %016lx", *stack++);
339 		touch_nmi_watchdog();
340 	}
341 	printk("\n");
342 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
343 }
344 
345 void show_stack(struct task_struct *task, unsigned long *sp)
346 {
347 	show_stack_log_lvl(task, NULL, sp, 0, "");
348 }
349 
350 /*
351  * The architecture-independent dump_stack generator
352  */
353 void dump_stack(void)
354 {
355 	unsigned long bp = 0;
356 	unsigned long stack;
357 
358 #ifdef CONFIG_FRAME_POINTER
359 	if (!bp)
360 		get_bp(bp);
361 #endif
362 
363 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
364 		current->pid, current->comm, print_tainted(),
365 		init_utsname()->release,
366 		(int)strcspn(init_utsname()->version, " "),
367 		init_utsname()->version);
368 	show_trace(NULL, NULL, &stack, bp);
369 }
370 EXPORT_SYMBOL(dump_stack);
371 
372 void show_registers(struct pt_regs *regs)
373 {
374 	int i;
375 	unsigned long sp;
376 	const int cpu = smp_processor_id();
377 	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
378 
379 	sp = regs->sp;
380 	printk("CPU %d ", cpu);
381 	__show_regs(regs, 1);
382 	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
383 		cur->comm, cur->pid, task_thread_info(cur), cur);
384 
385 	/*
386 	 * When in-kernel, we also print out the stack and code at the
387 	 * time of the fault..
388 	 */
389 	if (!user_mode(regs)) {
390 		unsigned int code_prologue = code_bytes * 43 / 64;
391 		unsigned int code_len = code_bytes;
392 		unsigned char c;
393 		u8 *ip;
394 
395 		printk(KERN_EMERG "Stack:\n");
396 		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
397 				regs->bp, KERN_EMERG);
398 
399 		printk(KERN_EMERG "Code: ");
400 
401 		ip = (u8 *)regs->ip - code_prologue;
402 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
403 			/* try starting at IP */
404 			ip = (u8 *)regs->ip;
405 			code_len = code_len - code_prologue + 1;
406 		}
407 		for (i = 0; i < code_len; i++, ip++) {
408 			if (ip < (u8 *)PAGE_OFFSET ||
409 					probe_kernel_address(ip, c)) {
410 				printk(" Bad RIP value.");
411 				break;
412 			}
413 			if (ip == (u8 *)regs->ip)
414 				printk("<%02x> ", c);
415 			else
416 				printk("%02x ", c);
417 		}
418 	}
419 	printk("\n");
420 }
421 
422 int is_valid_bugaddr(unsigned long ip)
423 {
424 	unsigned short ud2;
425 
426 	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
427 		return 0;
428 
429 	return ud2 == 0x0b0f;
430 }
431 
432 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
433 static int die_owner = -1;
434 static unsigned int die_nest_count;
435 
436 unsigned __kprobes long oops_begin(void)
437 {
438 	int cpu;
439 	unsigned long flags;
440 
441 	oops_enter();
442 
443 	/* racy, but better than risking deadlock. */
444 	raw_local_irq_save(flags);
445 	cpu = smp_processor_id();
446 	if (!__raw_spin_trylock(&die_lock)) {
447 		if (cpu == die_owner)
448 			/* nested oops. should stop eventually */;
449 		else
450 			__raw_spin_lock(&die_lock);
451 	}
452 	die_nest_count++;
453 	die_owner = cpu;
454 	console_verbose();
455 	bust_spinlocks(1);
456 	return flags;
457 }
458 
459 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
460 {
461 	if (regs && kexec_should_crash(current))
462 		crash_kexec(regs);
463 
464 	bust_spinlocks(0);
465 	die_owner = -1;
466 	add_taint(TAINT_DIE);
467 	die_nest_count--;
468 	if (!die_nest_count)
469 		/* Nest count reaches zero, release the lock. */
470 		__raw_spin_unlock(&die_lock);
471 	raw_local_irq_restore(flags);
472 	oops_exit();
473 
474 	if (!signr)
475 		return;
476 	if (in_interrupt())
477 		panic("Fatal exception in interrupt");
478 	if (panic_on_oops)
479 		panic("Fatal exception");
480 	do_exit(signr);
481 }
482 
483 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
484 {
485 	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
486 #ifdef CONFIG_PREEMPT
487 	printk("PREEMPT ");
488 #endif
489 #ifdef CONFIG_SMP
490 	printk("SMP ");
491 #endif
492 #ifdef CONFIG_DEBUG_PAGEALLOC
493 	printk("DEBUG_PAGEALLOC");
494 #endif
495 	printk("\n");
496 	sysfs_printk_last_file();
497 	if (notify_die(DIE_OOPS, str, regs, err,
498 			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
499 		return 1;
500 
501 	show_registers(regs);
502 	/* Executive summary in case the oops scrolled away */
503 	printk(KERN_ALERT "RIP ");
504 	printk_address(regs->ip, 1);
505 	printk(" RSP <%016lx>\n", regs->sp);
506 	return 0;
507 }
508 
509 /*
510  * This is gone through when something in the kernel has done something bad
511  * and is about to be terminated:
512  */
513 void die(const char *str, struct pt_regs *regs, long err)
514 {
515 	unsigned long flags = oops_begin();
516 	int sig = SIGSEGV;
517 
518 	if (!user_mode_vm(regs))
519 		report_bug(regs->ip, regs);
520 
521 	if (__die(str, regs, err))
522 		sig = 0;
523 	oops_end(flags, regs, sig);
524 }
525 
526 void notrace __kprobes
527 die_nmi(char *str, struct pt_regs *regs, int do_panic)
528 {
529 	unsigned long flags;
530 
531 	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
532 		return;
533 
534 	/*
535 	 * We are in trouble anyway, lets at least try
536 	 * to get a message out.
537 	 */
538 	flags = oops_begin();
539 	printk(KERN_EMERG "%s", str);
540 	printk(" on CPU%d, ip %08lx, registers:\n",
541 		smp_processor_id(), regs->ip);
542 	show_registers(regs);
543 	oops_end(flags, regs, 0);
544 	if (do_panic || panic_on_oops)
545 		panic("Non maskable interrupt");
546 	nmi_exit();
547 	local_irq_enable();
548 	do_exit(SIGBUS);
549 }
550 
551 static int __init oops_setup(char *s)
552 {
553 	if (!s)
554 		return -EINVAL;
555 	if (!strcmp(s, "panic"))
556 		panic_on_oops = 1;
557 	return 0;
558 }
559 early_param("oops", oops_setup);
560 
561 static int __init kstack_setup(char *s)
562 {
563 	if (!s)
564 		return -EINVAL;
565 	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
566 	return 0;
567 }
568 early_param("kstack", kstack_setup);
569 
570 static int __init code_bytes_setup(char *s)
571 {
572 	code_bytes = simple_strtoul(s, NULL, 0);
573 	if (code_bytes > 8192)
574 		code_bytes = 8192;
575 
576 	return 1;
577 }
578 __setup("code_bytes=", code_bytes_setup);
579