xref: /linux/arch/x86/kernel/dumpstack_64.c (revision a5766f11cfd3a0c03450d99c8fe548c2940be884)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/kexec.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
16 
17 #include <asm/stacktrace.h>
18 
19 #define STACKSLOTS_PER_LINE 4
20 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
21 
22 int panic_on_unrecovered_nmi;
23 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
24 static unsigned int code_bytes = 64;
25 static int die_counter;
26 
27 void printk_address(unsigned long address, int reliable)
28 {
29 	printk(" [<%p>] %s%pS\n", (void *) address,
30 			reliable ? "" : "? ", (void *) address);
31 }
32 
33 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
34 					unsigned *usedp, char **idp)
35 {
36 	static char ids[][8] = {
37 		[DEBUG_STACK - 1] = "#DB",
38 		[NMI_STACK - 1] = "NMI",
39 		[DOUBLEFAULT_STACK - 1] = "#DF",
40 		[STACKFAULT_STACK - 1] = "#SS",
41 		[MCE_STACK - 1] = "#MC",
42 #if DEBUG_STKSZ > EXCEPTION_STKSZ
43 		[N_EXCEPTION_STACKS ...
44 			N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
45 #endif
46 	};
47 	unsigned k;
48 
49 	/*
50 	 * Iterate over all exception stacks, and figure out whether
51 	 * 'stack' is in one of them:
52 	 */
53 	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
54 		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
55 		/*
56 		 * Is 'stack' above this exception frame's end?
57 		 * If yes then skip to the next frame.
58 		 */
59 		if (stack >= end)
60 			continue;
61 		/*
62 		 * Is 'stack' above this exception frame's start address?
63 		 * If yes then we found the right frame.
64 		 */
65 		if (stack >= end - EXCEPTION_STKSZ) {
66 			/*
67 			 * Make sure we only iterate through an exception
68 			 * stack once. If it comes up for the second time
69 			 * then there's something wrong going on - just
70 			 * break out and return NULL:
71 			 */
72 			if (*usedp & (1U << k))
73 				break;
74 			*usedp |= 1U << k;
75 			*idp = ids[k];
76 			return (unsigned long *)end;
77 		}
78 		/*
79 		 * If this is a debug stack, and if it has a larger size than
80 		 * the usual exception stacks, then 'stack' might still
81 		 * be within the lower portion of the debug stack:
82 		 */
83 #if DEBUG_STKSZ > EXCEPTION_STKSZ
84 		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
85 			unsigned j = N_EXCEPTION_STACKS - 1;
86 
87 			/*
88 			 * Black magic. A large debug stack is composed of
89 			 * multiple exception stack entries, which we
90 			 * iterate through now. Dont look:
91 			 */
92 			do {
93 				++j;
94 				end -= EXCEPTION_STKSZ;
95 				ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
96 			} while (stack < end - EXCEPTION_STKSZ);
97 			if (*usedp & (1U << j))
98 				break;
99 			*usedp |= 1U << j;
100 			*idp = ids[j];
101 			return (unsigned long *)end;
102 		}
103 #endif
104 	}
105 	return NULL;
106 }
107 
108 /*
109  * x86-64 can have up to three kernel stacks:
110  * process stack
111  * interrupt stack
112  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
113  */
114 
115 static inline int valid_stack_ptr(struct thread_info *tinfo,
116 			void *p, unsigned int size, void *end)
117 {
118 	void *t = tinfo;
119 	if (end) {
120 		if (p < end && p >= (end-THREAD_SIZE))
121 			return 1;
122 		else
123 			return 0;
124 	}
125 	return p > t && p < t + THREAD_SIZE - size;
126 }
127 
128 /* The form of the top of the frame on the stack */
129 struct stack_frame {
130 	struct stack_frame *next_frame;
131 	unsigned long return_address;
132 };
133 
134 static inline unsigned long
135 print_context_stack(struct thread_info *tinfo,
136 		unsigned long *stack, unsigned long bp,
137 		const struct stacktrace_ops *ops, void *data,
138 		unsigned long *end)
139 {
140 	struct stack_frame *frame = (struct stack_frame *)bp;
141 
142 	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
143 		unsigned long addr;
144 
145 		addr = *stack;
146 		if (__kernel_text_address(addr)) {
147 			if ((unsigned long) stack == bp + sizeof(long)) {
148 				ops->address(data, addr, 1);
149 				frame = frame->next_frame;
150 				bp = (unsigned long) frame;
151 			} else {
152 				ops->address(data, addr, bp == 0);
153 			}
154 		}
155 		stack++;
156 	}
157 	return bp;
158 }
159 
160 void dump_trace(struct task_struct *task, struct pt_regs *regs,
161 		unsigned long *stack, unsigned long bp,
162 		const struct stacktrace_ops *ops, void *data)
163 {
164 	const unsigned cpu = get_cpu();
165 	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
166 	unsigned used = 0;
167 	struct thread_info *tinfo;
168 
169 	if (!task)
170 		task = current;
171 
172 	if (!stack) {
173 		unsigned long dummy;
174 		stack = &dummy;
175 		if (task && task != current)
176 			stack = (unsigned long *)task->thread.sp;
177 	}
178 
179 #ifdef CONFIG_FRAME_POINTER
180 	if (!bp) {
181 		if (task == current) {
182 			/* Grab bp right from our regs */
183 			get_bp(bp);
184 		} else {
185 			/* bp is the last reg pushed by switch_to */
186 			bp = *(unsigned long *) task->thread.sp;
187 		}
188 	}
189 #endif
190 
191 	/*
192 	 * Print function call entries in all stacks, starting at the
193 	 * current stack address. If the stacks consist of nested
194 	 * exceptions
195 	 */
196 	tinfo = task_thread_info(task);
197 	for (;;) {
198 		char *id;
199 		unsigned long *estack_end;
200 		estack_end = in_exception_stack(cpu, (unsigned long)stack,
201 						&used, &id);
202 
203 		if (estack_end) {
204 			if (ops->stack(data, id) < 0)
205 				break;
206 
207 			bp = print_context_stack(tinfo, stack, bp, ops,
208 							data, estack_end);
209 			ops->stack(data, "<EOE>");
210 			/*
211 			 * We link to the next stack via the
212 			 * second-to-last pointer (index -2 to end) in the
213 			 * exception stack:
214 			 */
215 			stack = (unsigned long *) estack_end[-2];
216 			continue;
217 		}
218 		if (irqstack_end) {
219 			unsigned long *irqstack;
220 			irqstack = irqstack_end -
221 				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
222 
223 			if (stack >= irqstack && stack < irqstack_end) {
224 				if (ops->stack(data, "IRQ") < 0)
225 					break;
226 				bp = print_context_stack(tinfo, stack, bp,
227 						ops, data, irqstack_end);
228 				/*
229 				 * We link to the next stack (which would be
230 				 * the process stack normally) the last
231 				 * pointer (index -1 to end) in the IRQ stack:
232 				 */
233 				stack = (unsigned long *) (irqstack_end[-1]);
234 				irqstack_end = NULL;
235 				ops->stack(data, "EOI");
236 				continue;
237 			}
238 		}
239 		break;
240 	}
241 
242 	/*
243 	 * This handles the process stack:
244 	 */
245 	bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
246 	put_cpu();
247 }
248 EXPORT_SYMBOL(dump_trace);
249 
250 static void
251 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
252 {
253 	printk(data);
254 	print_symbol(msg, symbol);
255 	printk("\n");
256 }
257 
258 static void print_trace_warning(void *data, char *msg)
259 {
260 	printk("%s%s\n", (char *)data, msg);
261 }
262 
263 static int print_trace_stack(void *data, char *name)
264 {
265 	printk("%s <%s> ", (char *)data, name);
266 	return 0;
267 }
268 
269 /*
270  * Print one address/symbol entries per line.
271  */
272 static void print_trace_address(void *data, unsigned long addr, int reliable)
273 {
274 	touch_nmi_watchdog();
275 	printk(data);
276 	printk_address(addr, reliable);
277 }
278 
279 static const struct stacktrace_ops print_trace_ops = {
280 	.warning = print_trace_warning,
281 	.warning_symbol = print_trace_warning_symbol,
282 	.stack = print_trace_stack,
283 	.address = print_trace_address,
284 };
285 
286 static void
287 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
288 		unsigned long *stack, unsigned long bp, char *log_lvl)
289 {
290 	printk("%sCall Trace:\n", log_lvl);
291 	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
292 }
293 
294 void show_trace(struct task_struct *task, struct pt_regs *regs,
295 		unsigned long *stack, unsigned long bp)
296 {
297 	show_trace_log_lvl(task, regs, stack, bp, "");
298 }
299 
300 static void
301 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
302 		unsigned long *sp, unsigned long bp, char *log_lvl)
303 {
304 	unsigned long *stack;
305 	int i;
306 	const int cpu = smp_processor_id();
307 	unsigned long *irqstack_end =
308 		(unsigned long *) (cpu_pda(cpu)->irqstackptr);
309 	unsigned long *irqstack =
310 		(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
311 
312 	/*
313 	 * debugging aid: "show_stack(NULL, NULL);" prints the
314 	 * back trace for this cpu.
315 	 */
316 
317 	if (sp == NULL) {
318 		if (task)
319 			sp = (unsigned long *)task->thread.sp;
320 		else
321 			sp = (unsigned long *)&sp;
322 	}
323 
324 	stack = sp;
325 	for (i = 0; i < kstack_depth_to_print; i++) {
326 		if (stack >= irqstack && stack <= irqstack_end) {
327 			if (stack == irqstack_end) {
328 				stack = (unsigned long *) (irqstack_end[-1]);
329 				printk(" <EOI> ");
330 			}
331 		} else {
332 		if (((long) stack & (THREAD_SIZE-1)) == 0)
333 			break;
334 		}
335 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
336 			printk("\n%s", log_lvl);
337 		printk(" %016lx", *stack++);
338 		touch_nmi_watchdog();
339 	}
340 	printk("\n");
341 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
342 }
343 
344 void show_stack(struct task_struct *task, unsigned long *sp)
345 {
346 	show_stack_log_lvl(task, NULL, sp, 0, "");
347 }
348 
349 /*
350  * The architecture-independent dump_stack generator
351  */
352 void dump_stack(void)
353 {
354 	unsigned long bp = 0;
355 	unsigned long stack;
356 
357 #ifdef CONFIG_FRAME_POINTER
358 	if (!bp)
359 		get_bp(bp);
360 #endif
361 
362 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
363 		current->pid, current->comm, print_tainted(),
364 		init_utsname()->release,
365 		(int)strcspn(init_utsname()->version, " "),
366 		init_utsname()->version);
367 	show_trace(NULL, NULL, &stack, bp);
368 }
369 EXPORT_SYMBOL(dump_stack);
370 
371 void show_registers(struct pt_regs *regs)
372 {
373 	int i;
374 	unsigned long sp;
375 	const int cpu = smp_processor_id();
376 	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
377 
378 	sp = regs->sp;
379 	printk("CPU %d ", cpu);
380 	__show_regs(regs, 1);
381 	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
382 		cur->comm, cur->pid, task_thread_info(cur), cur);
383 
384 	/*
385 	 * When in-kernel, we also print out the stack and code at the
386 	 * time of the fault..
387 	 */
388 	if (!user_mode(regs)) {
389 		unsigned int code_prologue = code_bytes * 43 / 64;
390 		unsigned int code_len = code_bytes;
391 		unsigned char c;
392 		u8 *ip;
393 
394 		printk(KERN_EMERG "Stack:\n");
395 		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
396 				regs->bp, KERN_EMERG);
397 
398 		printk(KERN_EMERG "Code: ");
399 
400 		ip = (u8 *)regs->ip - code_prologue;
401 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
402 			/* try starting at IP */
403 			ip = (u8 *)regs->ip;
404 			code_len = code_len - code_prologue + 1;
405 		}
406 		for (i = 0; i < code_len; i++, ip++) {
407 			if (ip < (u8 *)PAGE_OFFSET ||
408 					probe_kernel_address(ip, c)) {
409 				printk(" Bad RIP value.");
410 				break;
411 			}
412 			if (ip == (u8 *)regs->ip)
413 				printk("<%02x> ", c);
414 			else
415 				printk("%02x ", c);
416 		}
417 	}
418 	printk("\n");
419 }
420 
421 int is_valid_bugaddr(unsigned long ip)
422 {
423 	unsigned short ud2;
424 
425 	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
426 		return 0;
427 
428 	return ud2 == 0x0b0f;
429 }
430 
431 static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
432 static int die_owner = -1;
433 static unsigned int die_nest_count;
434 
435 unsigned __kprobes long oops_begin(void)
436 {
437 	int cpu;
438 	unsigned long flags;
439 
440 	oops_enter();
441 
442 	/* racy, but better than risking deadlock. */
443 	raw_local_irq_save(flags);
444 	cpu = smp_processor_id();
445 	if (!__raw_spin_trylock(&die_lock)) {
446 		if (cpu == die_owner)
447 			/* nested oops. should stop eventually */;
448 		else
449 			__raw_spin_lock(&die_lock);
450 	}
451 	die_nest_count++;
452 	die_owner = cpu;
453 	console_verbose();
454 	bust_spinlocks(1);
455 	return flags;
456 }
457 
458 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
459 {
460 	die_owner = -1;
461 	bust_spinlocks(0);
462 	die_nest_count--;
463 	if (!die_nest_count)
464 		/* Nest count reaches zero, release the lock. */
465 		__raw_spin_unlock(&die_lock);
466 	raw_local_irq_restore(flags);
467 	if (!regs) {
468 		oops_exit();
469 		return;
470 	}
471 	if (in_interrupt())
472 		panic("Fatal exception in interrupt");
473 	if (panic_on_oops)
474 		panic("Fatal exception");
475 	oops_exit();
476 	do_exit(signr);
477 }
478 
479 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
480 {
481 	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
482 #ifdef CONFIG_PREEMPT
483 	printk("PREEMPT ");
484 #endif
485 #ifdef CONFIG_SMP
486 	printk("SMP ");
487 #endif
488 #ifdef CONFIG_DEBUG_PAGEALLOC
489 	printk("DEBUG_PAGEALLOC");
490 #endif
491 	printk("\n");
492 	if (notify_die(DIE_OOPS, str, regs, err,
493 			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
494 		return 1;
495 
496 	show_registers(regs);
497 	add_taint(TAINT_DIE);
498 	/* Executive summary in case the oops scrolled away */
499 	printk(KERN_ALERT "RIP ");
500 	printk_address(regs->ip, 1);
501 	printk(" RSP <%016lx>\n", regs->sp);
502 	if (kexec_should_crash(current))
503 		crash_kexec(regs);
504 	return 0;
505 }
506 
507 void die(const char *str, struct pt_regs *regs, long err)
508 {
509 	unsigned long flags = oops_begin();
510 
511 	if (!user_mode(regs))
512 		report_bug(regs->ip, regs);
513 
514 	if (__die(str, regs, err))
515 		regs = NULL;
516 	oops_end(flags, regs, SIGSEGV);
517 }
518 
519 notrace __kprobes void
520 die_nmi(char *str, struct pt_regs *regs, int do_panic)
521 {
522 	unsigned long flags;
523 
524 	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
525 		return;
526 
527 	flags = oops_begin();
528 	/*
529 	 * We are in trouble anyway, lets at least try
530 	 * to get a message out.
531 	 */
532 	printk(KERN_EMERG "%s", str);
533 	printk(" on CPU%d, ip %08lx, registers:\n",
534 		smp_processor_id(), regs->ip);
535 	show_registers(regs);
536 	if (kexec_should_crash(current))
537 		crash_kexec(regs);
538 	if (do_panic || panic_on_oops)
539 		panic("Non maskable interrupt");
540 	oops_end(flags, NULL, SIGBUS);
541 	nmi_exit();
542 	local_irq_enable();
543 	do_exit(SIGBUS);
544 }
545 
546 static int __init oops_setup(char *s)
547 {
548 	if (!s)
549 		return -EINVAL;
550 	if (!strcmp(s, "panic"))
551 		panic_on_oops = 1;
552 	return 0;
553 }
554 early_param("oops", oops_setup);
555 
556 static int __init kstack_setup(char *s)
557 {
558 	if (!s)
559 		return -EINVAL;
560 	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
561 	return 0;
562 }
563 early_param("kstack", kstack_setup);
564 
565 static int __init code_bytes_setup(char *s)
566 {
567 	code_bytes = simple_strtoul(s, NULL, 0);
568 	if (code_bytes > 8192)
569 		code_bytes = 8192;
570 
571 	return 1;
572 }
573 __setup("code_bytes=", code_bytes_setup);
574