xref: /linux/arch/s390/kernel/unwind_bc.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include <linux/export.h>
4 #include <linux/sched.h>
5 #include <linux/sched/task.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/interrupt.h>
8 #include <asm/sections.h>
9 #include <asm/ptrace.h>
10 #include <asm/bitops.h>
11 #include <asm/stacktrace.h>
12 #include <asm/unwind.h>
13 
14 unsigned long unwind_get_return_address(struct unwind_state *state)
15 {
16 	if (unwind_done(state))
17 		return 0;
18 	return __kernel_text_address(state->ip) ? state->ip : 0;
19 }
20 EXPORT_SYMBOL_GPL(unwind_get_return_address);
21 
22 static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
23 {
24 	return (sp <= state->sp) ||
25 		(sp > state->stack_info.end - sizeof(struct stack_frame));
26 }
27 
28 static bool update_stack_info(struct unwind_state *state, unsigned long sp)
29 {
30 	struct stack_info *info = &state->stack_info;
31 	unsigned long *mask = &state->stack_mask;
32 
33 	/* New stack pointer leaves the current stack */
34 	if (get_stack_info(sp, state->task, info, mask) != 0 ||
35 	    !on_stack(info, sp, sizeof(struct stack_frame)))
36 		/* 'sp' does not point to a valid stack */
37 		return false;
38 	return true;
39 }
40 
41 static inline bool is_final_pt_regs(struct unwind_state *state,
42 				    struct pt_regs *regs)
43 {
44 	/* user mode or kernel thread pt_regs at the bottom of task stack */
45 	if (task_pt_regs(state->task) == regs)
46 		return true;
47 
48 	/* user mode pt_regs at the bottom of irq stack */
49 	return state->stack_info.type == STACK_TYPE_IRQ &&
50 	       state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs &&
51 	       READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE;
52 }
53 
54 /* Avoid KMSAN false positives from touching uninitialized frames. */
55 __no_kmsan_checks
56 bool unwind_next_frame(struct unwind_state *state)
57 {
58 	struct stack_info *info = &state->stack_info;
59 	struct stack_frame *sf;
60 	struct pt_regs *regs;
61 	unsigned long sp, ip;
62 	bool reliable;
63 
64 	regs = state->regs;
65 	if (unlikely(regs)) {
66 		sp = state->sp;
67 		sf = (struct stack_frame *) sp;
68 		ip = READ_ONCE_NOCHECK(sf->gprs[8]);
69 		reliable = false;
70 		regs = NULL;
71 		/* skip bogus %r14 or if is the same as regs->psw.addr */
72 		if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
73 			state->regs = NULL;
74 			return unwind_next_frame(state);
75 		}
76 	} else {
77 		sf = (struct stack_frame *) state->sp;
78 		sp = READ_ONCE_NOCHECK(sf->back_chain);
79 		if (likely(sp)) {
80 			/* Non-zero back-chain points to the previous frame */
81 			if (unlikely(outside_of_stack(state, sp))) {
82 				if (!update_stack_info(state, sp))
83 					goto out_err;
84 			}
85 			sf = (struct stack_frame *) sp;
86 			ip = READ_ONCE_NOCHECK(sf->gprs[8]);
87 			reliable = true;
88 		} else {
89 			/* No back-chain, look for a pt_regs structure */
90 			sp = state->sp + STACK_FRAME_OVERHEAD;
91 			if (!on_stack(info, sp, sizeof(struct pt_regs)))
92 				goto out_err;
93 			regs = (struct pt_regs *) sp;
94 			if (is_final_pt_regs(state, regs))
95 				goto out_stop;
96 			ip = READ_ONCE_NOCHECK(regs->psw.addr);
97 			sp = READ_ONCE_NOCHECK(regs->gprs[15]);
98 			if (unlikely(outside_of_stack(state, sp))) {
99 				if (!update_stack_info(state, sp))
100 					goto out_err;
101 			}
102 			reliable = true;
103 		}
104 	}
105 
106 	/* Sanity check: ABI requires SP to be aligned 8 bytes. */
107 	if (sp & 0x7)
108 		goto out_err;
109 
110 	/* Update unwind state */
111 	state->sp = sp;
112 	state->regs = regs;
113 	state->reliable = reliable;
114 	state->ip = unwind_recover_ret_addr(state, ip);
115 	return true;
116 
117 out_err:
118 	state->error = true;
119 out_stop:
120 	state->stack_info.type = STACK_TYPE_UNKNOWN;
121 	return false;
122 }
123 EXPORT_SYMBOL_GPL(unwind_next_frame);
124 
125 /* Avoid KMSAN false positives from touching uninitialized frames. */
126 __no_kmsan_checks
127 void __unwind_start(struct unwind_state *state, struct task_struct *task,
128 		    struct pt_regs *regs, unsigned long first_frame)
129 {
130 	struct stack_info *info = &state->stack_info;
131 	struct stack_frame *sf;
132 	unsigned long ip, sp;
133 
134 	memset(state, 0, sizeof(*state));
135 	state->task = task;
136 	state->regs = regs;
137 
138 	/* Don't even attempt to start from user mode regs: */
139 	if (regs && user_mode(regs)) {
140 		info->type = STACK_TYPE_UNKNOWN;
141 		return;
142 	}
143 
144 	/* Get the instruction pointer from pt_regs or the stack frame */
145 	if (regs) {
146 		ip = regs->psw.addr;
147 		sp = regs->gprs[15];
148 	} else if (task == current) {
149 		sp = current_frame_address();
150 	} else {
151 		sp = task->thread.ksp;
152 	}
153 
154 	/* Get current stack pointer and initialize stack info */
155 	if (!update_stack_info(state, sp)) {
156 		/* Something is wrong with the stack pointer */
157 		info->type = STACK_TYPE_UNKNOWN;
158 		state->error = true;
159 		return;
160 	}
161 
162 	if (!regs) {
163 		/* Stack frame is within valid stack */
164 		sf = (struct stack_frame *)sp;
165 		ip = READ_ONCE_NOCHECK(sf->gprs[8]);
166 	}
167 
168 	/* Update unwind state */
169 	state->sp = sp;
170 	state->reliable = true;
171 	state->ip = unwind_recover_ret_addr(state, ip);
172 
173 	if (!first_frame)
174 		return;
175 	/* Skip through the call chain to the specified starting frame */
176 	while (!unwind_done(state)) {
177 		if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) {
178 			if (state->sp >= first_frame)
179 				break;
180 		}
181 		unwind_next_frame(state);
182 	}
183 }
184 EXPORT_SYMBOL_GPL(__unwind_start);
185