xref: /linux/arch/x86/kernel/dumpstack_32.c (revision cb76c93982404273d746f3ccd5085b47689099a8)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9 #include <linux/kdebug.h>
10 #include <linux/export.h>
11 #include <linux/ptrace.h>
12 #include <linux/kexec.h>
13 #include <linux/sysfs.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
16 
17 #include <asm/stacktrace.h>
18 
19 void stack_type_str(enum stack_type type, const char **begin, const char **end)
20 {
21 	switch (type) {
22 	case STACK_TYPE_IRQ:
23 	case STACK_TYPE_SOFTIRQ:
24 		*begin = "IRQ";
25 		*end   = "EOI";
26 		break;
27 	default:
28 		*begin = NULL;
29 		*end   = NULL;
30 	}
31 }
32 
33 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
34 {
35 	unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack);
36 	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
37 
38 	if (stack < begin || stack >= end)
39 		return false;
40 
41 	info->type	= STACK_TYPE_IRQ;
42 	info->begin	= begin;
43 	info->end	= end;
44 
45 	/*
46 	 * See irq_32.c -- the next stack pointer is stored at the beginning of
47 	 * the stack.
48 	 */
49 	info->next_sp	= (unsigned long *)*begin;
50 
51 	return true;
52 }
53 
54 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
55 {
56 	unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack);
57 	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
58 
59 	if (stack < begin || stack >= end)
60 		return false;
61 
62 	info->type	= STACK_TYPE_SOFTIRQ;
63 	info->begin	= begin;
64 	info->end	= end;
65 
66 	/*
67 	 * The next stack pointer is stored at the beginning of the stack.
68 	 * See irq_32.c.
69 	 */
70 	info->next_sp	= (unsigned long *)*begin;
71 
72 	return true;
73 }
74 
75 int get_stack_info(unsigned long *stack, struct task_struct *task,
76 		   struct stack_info *info, unsigned long *visit_mask)
77 {
78 	if (!stack)
79 		goto unknown;
80 
81 	task = task ? : current;
82 
83 	if (in_task_stack(stack, task, info))
84 		return 0;
85 
86 	if (task != current)
87 		goto unknown;
88 
89 	if (in_hardirq_stack(stack, info))
90 		return 0;
91 
92 	if (in_softirq_stack(stack, info))
93 		return 0;
94 
95 unknown:
96 	info->type = STACK_TYPE_UNKNOWN;
97 	return -EINVAL;
98 }
99 
100 void dump_trace(struct task_struct *task, struct pt_regs *regs,
101 		unsigned long *stack, unsigned long bp,
102 		const struct stacktrace_ops *ops, void *data)
103 {
104 	unsigned long visit_mask = 0;
105 	int graph = 0;
106 
107 	task = task ? : current;
108 	stack = stack ? : get_stack_pointer(task, regs);
109 	bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
110 
111 	for (;;) {
112 		const char *begin_str, *end_str;
113 		struct stack_info info;
114 
115 		if (get_stack_info(stack, task, &info, &visit_mask))
116 			break;
117 
118 		stack_type_str(info.type, &begin_str, &end_str);
119 
120 		if (begin_str && ops->stack(data, begin_str) < 0)
121 			break;
122 
123 		bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
124 
125 		if (end_str && ops->stack(data, end_str) < 0)
126 			break;
127 
128 		stack = info.next_sp;
129 
130 		touch_nmi_watchdog();
131 	}
132 }
133 EXPORT_SYMBOL(dump_trace);
134 
135 void
136 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
137 		   unsigned long *sp, unsigned long bp, char *log_lvl)
138 {
139 	unsigned long *stack;
140 	int i;
141 
142 	sp = sp ? : get_stack_pointer(task, regs);
143 
144 	stack = sp;
145 	for (i = 0; i < kstack_depth_to_print; i++) {
146 		if (kstack_end(stack))
147 			break;
148 		if ((i % STACKSLOTS_PER_LINE) == 0) {
149 			if (i != 0)
150 				pr_cont("\n");
151 			printk("%s %08lx", log_lvl, *stack++);
152 		} else
153 			pr_cont(" %08lx", *stack++);
154 		touch_nmi_watchdog();
155 	}
156 	pr_cont("\n");
157 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
158 }
159 
160 
161 void show_regs(struct pt_regs *regs)
162 {
163 	int i;
164 
165 	show_regs_print_info(KERN_EMERG);
166 	__show_regs(regs, !user_mode(regs));
167 
168 	/*
169 	 * When in-kernel, we also print out the stack and code at the
170 	 * time of the fault..
171 	 */
172 	if (!user_mode(regs)) {
173 		unsigned int code_prologue = code_bytes * 43 / 64;
174 		unsigned int code_len = code_bytes;
175 		unsigned char c;
176 		u8 *ip;
177 
178 		pr_emerg("Stack:\n");
179 		show_stack_log_lvl(NULL, regs, NULL, 0, KERN_EMERG);
180 
181 		pr_emerg("Code:");
182 
183 		ip = (u8 *)regs->ip - code_prologue;
184 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
185 			/* try starting at IP */
186 			ip = (u8 *)regs->ip;
187 			code_len = code_len - code_prologue + 1;
188 		}
189 		for (i = 0; i < code_len; i++, ip++) {
190 			if (ip < (u8 *)PAGE_OFFSET ||
191 					probe_kernel_address(ip, c)) {
192 				pr_cont("  Bad EIP value.");
193 				break;
194 			}
195 			if (ip == (u8 *)regs->ip)
196 				pr_cont(" <%02x>", c);
197 			else
198 				pr_cont(" %02x", c);
199 		}
200 	}
201 	pr_cont("\n");
202 }
203 
204 int is_valid_bugaddr(unsigned long ip)
205 {
206 	unsigned short ud2;
207 
208 	if (ip < PAGE_OFFSET)
209 		return 0;
210 	if (probe_kernel_address((unsigned short *)ip, ud2))
211 		return 0;
212 
213 	return ud2 == 0x0b0f;
214 }
215