xref: /linux/arch/s390/kernel/unwind_bc.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/sched.h>
3 #include <linux/sched/task.h>
4 #include <linux/sched/task_stack.h>
5 #include <linux/interrupt.h>
6 #include <asm/sections.h>
7 #include <asm/ptrace.h>
8 #include <asm/bitops.h>
9 #include <asm/stacktrace.h>
10 #include <asm/unwind.h>
11 
12 unsigned long unwind_get_return_address(struct unwind_state *state)
13 {
14 	if (unwind_done(state))
15 		return 0;
16 	return __kernel_text_address(state->ip) ? state->ip : 0;
17 }
18 EXPORT_SYMBOL_GPL(unwind_get_return_address);
19 
20 static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
21 {
22 	return (sp <= state->sp) ||
23 		(sp > state->stack_info.end - sizeof(struct stack_frame));
24 }
25 
26 static bool update_stack_info(struct unwind_state *state, unsigned long sp)
27 {
28 	struct stack_info *info = &state->stack_info;
29 	unsigned long *mask = &state->stack_mask;
30 
31 	/* New stack pointer leaves the current stack */
32 	if (get_stack_info(sp, state->task, info, mask) != 0 ||
33 	    !on_stack(info, sp, sizeof(struct stack_frame)))
34 		/* 'sp' does not point to a valid stack */
35 		return false;
36 	return true;
37 }
38 
39 static inline bool is_final_pt_regs(struct unwind_state *state,
40 				    struct pt_regs *regs)
41 {
42 	/* user mode or kernel thread pt_regs at the bottom of task stack */
43 	if (task_pt_regs(state->task) == regs)
44 		return true;
45 
46 	/* user mode pt_regs at the bottom of irq stack */
47 	return state->stack_info.type == STACK_TYPE_IRQ &&
48 	       state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs &&
49 	       READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE;
50 }
51 
52 /* Avoid KMSAN false positives from touching uninitialized frames. */
53 __no_kmsan_checks
54 bool unwind_next_frame(struct unwind_state *state)
55 {
56 	struct stack_info *info = &state->stack_info;
57 	struct stack_frame *sf;
58 	struct pt_regs *regs;
59 	unsigned long sp, ip;
60 	bool reliable;
61 
62 	regs = state->regs;
63 	if (unlikely(regs)) {
64 		sp = state->sp;
65 		sf = (struct stack_frame *) sp;
66 		ip = READ_ONCE_NOCHECK(sf->gprs[8]);
67 		reliable = false;
68 		regs = NULL;
69 		/* skip bogus %r14 or if is the same as regs->psw.addr */
70 		if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
71 			state->regs = NULL;
72 			return unwind_next_frame(state);
73 		}
74 	} else {
75 		sf = (struct stack_frame *) state->sp;
76 		sp = READ_ONCE_NOCHECK(sf->back_chain);
77 		if (likely(sp)) {
78 			/* Non-zero back-chain points to the previous frame */
79 			if (unlikely(outside_of_stack(state, sp))) {
80 				if (!update_stack_info(state, sp))
81 					goto out_err;
82 			}
83 			sf = (struct stack_frame *) sp;
84 			ip = READ_ONCE_NOCHECK(sf->gprs[8]);
85 			reliable = true;
86 		} else {
87 			/* No back-chain, look for a pt_regs structure */
88 			sp = state->sp + STACK_FRAME_OVERHEAD;
89 			if (!on_stack(info, sp, sizeof(struct pt_regs)))
90 				goto out_err;
91 			regs = (struct pt_regs *) sp;
92 			if (is_final_pt_regs(state, regs))
93 				goto out_stop;
94 			ip = READ_ONCE_NOCHECK(regs->psw.addr);
95 			sp = READ_ONCE_NOCHECK(regs->gprs[15]);
96 			if (unlikely(outside_of_stack(state, sp))) {
97 				if (!update_stack_info(state, sp))
98 					goto out_err;
99 			}
100 			reliable = true;
101 		}
102 	}
103 
104 	/* Sanity check: ABI requires SP to be aligned 8 bytes. */
105 	if (sp & 0x7)
106 		goto out_err;
107 
108 	/* Update unwind state */
109 	state->sp = sp;
110 	state->regs = regs;
111 	state->reliable = reliable;
112 	state->ip = unwind_recover_ret_addr(state, ip);
113 	return true;
114 
115 out_err:
116 	state->error = true;
117 out_stop:
118 	state->stack_info.type = STACK_TYPE_UNKNOWN;
119 	return false;
120 }
121 EXPORT_SYMBOL_GPL(unwind_next_frame);
122 
123 /* Avoid KMSAN false positives from touching uninitialized frames. */
124 __no_kmsan_checks
125 void __unwind_start(struct unwind_state *state, struct task_struct *task,
126 		    struct pt_regs *regs, unsigned long first_frame)
127 {
128 	struct stack_info *info = &state->stack_info;
129 	struct stack_frame *sf;
130 	unsigned long ip, sp;
131 
132 	memset(state, 0, sizeof(*state));
133 	state->task = task;
134 	state->regs = regs;
135 
136 	/* Don't even attempt to start from user mode regs: */
137 	if (regs && user_mode(regs)) {
138 		info->type = STACK_TYPE_UNKNOWN;
139 		return;
140 	}
141 
142 	/* Get the instruction pointer from pt_regs or the stack frame */
143 	if (regs) {
144 		ip = regs->psw.addr;
145 		sp = regs->gprs[15];
146 	} else if (task == current) {
147 		sp = current_frame_address();
148 	} else {
149 		sp = task->thread.ksp;
150 	}
151 
152 	/* Get current stack pointer and initialize stack info */
153 	if (!update_stack_info(state, sp)) {
154 		/* Something is wrong with the stack pointer */
155 		info->type = STACK_TYPE_UNKNOWN;
156 		state->error = true;
157 		return;
158 	}
159 
160 	if (!regs) {
161 		/* Stack frame is within valid stack */
162 		sf = (struct stack_frame *)sp;
163 		ip = READ_ONCE_NOCHECK(sf->gprs[8]);
164 	}
165 
166 	/* Update unwind state */
167 	state->sp = sp;
168 	state->reliable = true;
169 	state->ip = unwind_recover_ret_addr(state, ip);
170 
171 	if (!first_frame)
172 		return;
173 	/* Skip through the call chain to the specified starting frame */
174 	while (!unwind_done(state)) {
175 		if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) {
176 			if (state->sp >= first_frame)
177 				break;
178 		}
179 		unwind_next_frame(state);
180 	}
181 }
182 EXPORT_SYMBOL_GPL(__unwind_start);
183