xref: /linux/arch/s390/kernel/stacktrace.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Stack trace management functions
4  *
5  *  Copyright IBM Corp. 2006
6  */
7 
8 #include <linux/perf_event.h>
9 #include <linux/stacktrace.h>
10 #include <linux/uaccess.h>
11 #include <linux/compat.h>
12 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
14 #include <asm/kprobes.h>
15 #include <asm/ptrace.h>
16 
17 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
18 		     struct task_struct *task, struct pt_regs *regs)
19 {
20 	struct unwind_state state;
21 	unsigned long addr;
22 
23 	unwind_for_each_frame(&state, task, regs, 0) {
24 		addr = unwind_get_return_address(&state);
25 		if (!addr || !consume_entry(cookie, addr))
26 			break;
27 	}
28 }
29 
30 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
31 			     void *cookie, struct task_struct *task)
32 {
33 	struct unwind_state state;
34 	unsigned long addr;
35 
36 	unwind_for_each_frame(&state, task, NULL, 0) {
37 		if (state.stack_info.type != STACK_TYPE_TASK)
38 			return -EINVAL;
39 
40 		if (state.regs)
41 			return -EINVAL;
42 
43 		addr = unwind_get_return_address(&state);
44 		if (!addr)
45 			return -EINVAL;
46 
47 #ifdef CONFIG_RETHOOK
48 		/*
49 		 * Mark stacktraces with krethook functions on them
50 		 * as unreliable.
51 		 */
52 		if (state.ip == (unsigned long)arch_rethook_trampoline)
53 			return -EINVAL;
54 #endif
55 
56 		if (!consume_entry(cookie, addr))
57 			return -EINVAL;
58 	}
59 
60 	/* Check for stack corruption */
61 	if (unwind_error(&state))
62 		return -EINVAL;
63 	return 0;
64 }
65 
66 static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
67 			    struct perf_callchain_entry_ctx *entry, bool perf,
68 			    unsigned long ip)
69 {
70 #ifdef CONFIG_PERF_EVENTS
71 	if (perf) {
72 		if (perf_callchain_store(entry, ip))
73 			return false;
74 		return true;
75 	}
76 #endif
77 	return consume_entry(cookie, ip);
78 }
79 
80 static inline bool ip_invalid(unsigned long ip)
81 {
82 	/*
83 	 * Perform some basic checks if an instruction address taken
84 	 * from unreliable source is invalid.
85 	 */
86 	if (ip & 1)
87 		return true;
88 	if (ip < mmap_min_addr)
89 		return true;
90 	if (ip >= current->mm->context.asce_limit)
91 		return true;
92 	return false;
93 }
94 
95 static inline bool ip_within_vdso(unsigned long ip)
96 {
97 	return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
98 }
99 
100 void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
101 				 struct perf_callchain_entry_ctx *entry,
102 				 const struct pt_regs *regs, bool perf)
103 {
104 	struct stack_frame_vdso_wrapper __user *sf_vdso;
105 	struct stack_frame_user __user *sf;
106 	unsigned long ip, sp;
107 	bool first = true;
108 
109 	if (is_compat_task())
110 		return;
111 	if (!current->mm)
112 		return;
113 	ip = instruction_pointer(regs);
114 	if (!store_ip(consume_entry, cookie, entry, perf, ip))
115 		return;
116 	sf = (void __user *)user_stack_pointer(regs);
117 	pagefault_disable();
118 	while (1) {
119 		if (__get_user(sp, &sf->back_chain))
120 			break;
121 		/*
122 		 * VDSO entry code has a non-standard stack frame layout.
123 		 * See VDSO user wrapper code for details.
124 		 */
125 		if (!sp && ip_within_vdso(ip)) {
126 			sf_vdso = (void __user *)sf;
127 			if (__get_user(ip, &sf_vdso->return_address))
128 				break;
129 			sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
130 			sf = (void __user *)sp;
131 			if (__get_user(sp, &sf->back_chain))
132 				break;
133 		} else {
134 			sf = (void __user *)sp;
135 			if (__get_user(ip, &sf->gprs[8]))
136 				break;
137 		}
138 		/* Sanity check: ABI requires SP to be 8 byte aligned. */
139 		if (sp & 0x7)
140 			break;
141 		if (ip_invalid(ip)) {
142 			/*
143 			 * If the instruction address is invalid, and this
144 			 * is the first stack frame, assume r14 has not
145 			 * been written to the stack yet. Otherwise exit.
146 			 */
147 			if (!first)
148 				break;
149 			ip = regs->gprs[14];
150 			if (ip_invalid(ip))
151 				break;
152 		}
153 		if (!store_ip(consume_entry, cookie, entry, perf, ip))
154 			return;
155 		first = false;
156 	}
157 	pagefault_enable();
158 }
159 
160 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
161 			  const struct pt_regs *regs)
162 {
163 	arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
164 }
165 
166 unsigned long return_address(unsigned int n)
167 {
168 	struct unwind_state state;
169 	unsigned long addr;
170 
171 	/* Increment to skip current stack entry */
172 	n++;
173 
174 	unwind_for_each_frame(&state, NULL, NULL, 0) {
175 		addr = unwind_get_return_address(&state);
176 		if (!addr)
177 			break;
178 		if (!n--)
179 			return addr;
180 	}
181 	return 0;
182 }
183 EXPORT_SYMBOL_GPL(return_address);
184