xref: /linux/arch/x86/lib/retpoline.S (revision d27bb0246e5356dbef4d923e72c680bf893885a8)
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/stringify.h>
4#include <linux/linkage.h>
5#include <asm/dwarf2.h>
6#include <asm/cpufeatures.h>
7#include <asm/alternative.h>
8#include <asm/asm-offsets.h>
9#include <asm/export.h>
10#include <asm/nospec-branch.h>
11#include <asm/unwind_hints.h>
12#include <asm/percpu.h>
13#include <asm/frame.h>
14#include <asm/nops.h>
15
16	.section .text..__x86.indirect_thunk
17
18
19.macro POLINE reg
20	ANNOTATE_INTRA_FUNCTION_CALL
21	call    .Ldo_rop_\@
22	int3
23.Ldo_rop_\@:
24	mov     %\reg, (%_ASM_SP)
25	UNWIND_HINT_FUNC
26.endm
27
28.macro RETPOLINE reg
29	POLINE \reg
30	RET
31.endm
32
33.macro THUNK reg
34
35	.align RETPOLINE_THUNK_SIZE
36SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
37	UNWIND_HINT_UNDEFINED
38	ANNOTATE_NOENDBR
39
40	ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
41		      __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
42		      __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
43
44.endm
45
46/*
47 * Despite being an assembler file we can't just use .irp here
48 * because __KSYM_DEPS__ only uses the C preprocessor and would
49 * only see one instance of "__x86_indirect_thunk_\reg" rather
50 * than one per register with the correct names. So we do it
51 * the simple and nasty way...
52 *
53 * Worse, you can only have a single EXPORT_SYMBOL per line,
54 * and CPP can't insert newlines, so we have to repeat everything
55 * at least twice.
56 */
57
58#define __EXPORT_THUNK(sym)	_ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
59
60	.align RETPOLINE_THUNK_SIZE
61SYM_CODE_START(__x86_indirect_thunk_array)
62
63#define GEN(reg) THUNK reg
64#include <asm/GEN-for-each-reg.h>
65#undef GEN
66
67	.align RETPOLINE_THUNK_SIZE
68SYM_CODE_END(__x86_indirect_thunk_array)
69
70#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
71#include <asm/GEN-for-each-reg.h>
72#undef GEN
73
74#ifdef CONFIG_CALL_DEPTH_TRACKING
75.macro CALL_THUNK reg
76	.align RETPOLINE_THUNK_SIZE
77
78SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
79	UNWIND_HINT_UNDEFINED
80	ANNOTATE_NOENDBR
81
82	CALL_DEPTH_ACCOUNT
83	POLINE \reg
84	ANNOTATE_UNRET_SAFE
85	ret
86	int3
87.endm
88
89	.align RETPOLINE_THUNK_SIZE
90SYM_CODE_START(__x86_indirect_call_thunk_array)
91
92#define GEN(reg) CALL_THUNK reg
93#include <asm/GEN-for-each-reg.h>
94#undef GEN
95
96	.align RETPOLINE_THUNK_SIZE
97SYM_CODE_END(__x86_indirect_call_thunk_array)
98
99#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)
100#include <asm/GEN-for-each-reg.h>
101#undef GEN
102
103.macro JUMP_THUNK reg
104	.align RETPOLINE_THUNK_SIZE
105
106SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
107	UNWIND_HINT_UNDEFINED
108	ANNOTATE_NOENDBR
109	POLINE \reg
110	ANNOTATE_UNRET_SAFE
111	ret
112	int3
113.endm
114
115	.align RETPOLINE_THUNK_SIZE
116SYM_CODE_START(__x86_indirect_jump_thunk_array)
117
118#define GEN(reg) JUMP_THUNK reg
119#include <asm/GEN-for-each-reg.h>
120#undef GEN
121
122	.align RETPOLINE_THUNK_SIZE
123SYM_CODE_END(__x86_indirect_jump_thunk_array)
124
125#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)
126#include <asm/GEN-for-each-reg.h>
127#undef GEN
128#endif
129/*
130 * This function name is magical and is used by -mfunction-return=thunk-extern
131 * for the compiler to generate JMPs to it.
132 */
133#ifdef CONFIG_RETHUNK
134
135/*
136 * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
137 * special addresses:
138 *
139 * - srso_alias_untrain_ret() is 2M aligned
140 * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
141 * and 20 in its virtual address are set (while those bits in the
142 * srso_alias_untrain_ret() function are cleared).
143 *
144 * This guarantees that those two addresses will alias in the branch
145 * target buffer of Zen3/4 generations, leading to any potential
146 * poisoned entries at that BTB slot to get evicted.
147 *
148 * As a result, srso_alias_safe_ret() becomes a safe return.
149 */
150#ifdef CONFIG_CPU_SRSO
151	.section .text..__x86.rethunk_untrain
152
153SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
154	UNWIND_HINT_FUNC
155	ANNOTATE_NOENDBR
156	ASM_NOP2
157	lfence
158	jmp srso_alias_return_thunk
159SYM_FUNC_END(srso_alias_untrain_ret)
160__EXPORT_THUNK(srso_alias_untrain_ret)
161
162	.section .text..__x86.rethunk_safe
163#else
164/* dummy definition for alternatives */
165SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
166	ANNOTATE_UNRET_SAFE
167	ret
168	int3
169SYM_FUNC_END(srso_alias_untrain_ret)
170#endif
171
172SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
173	lea 8(%_ASM_SP), %_ASM_SP
174	UNWIND_HINT_FUNC
175	ANNOTATE_UNRET_SAFE
176	ret
177	int3
178SYM_FUNC_END(srso_alias_safe_ret)
179
180	.section .text..__x86.return_thunk
181
182SYM_CODE_START(srso_alias_return_thunk)
183	UNWIND_HINT_FUNC
184	ANNOTATE_NOENDBR
185	call srso_alias_safe_ret
186	ud2
187SYM_CODE_END(srso_alias_return_thunk)
188
189/*
190 * Some generic notes on the untraining sequences:
191 *
192 * They are interchangeable when it comes to flushing potentially wrong
193 * RET predictions from the BTB.
194 *
195 * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
196 * Retbleed sequence because the return sequence done there
197 * (srso_safe_ret()) is longer and the return sequence must fully nest
198 * (end before) the untraining sequence. Therefore, the untraining
199 * sequence must fully overlap the return sequence.
200 *
201 * Regarding alignment - the instructions which need to be untrained,
202 * must all start at a cacheline boundary for Zen1/2 generations. That
203 * is, instruction sequences starting at srso_safe_ret() and
204 * the respective instruction sequences at retbleed_return_thunk()
205 * must start at a cacheline boundary.
206 */
207
208/*
209 * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
210 * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
211 *    alignment within the BTB.
212 * 2) The instruction at retbleed_untrain_ret must contain, and not
213 *    end with, the 0xc3 byte of the RET.
214 * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
215 *    from re-poisioning the BTB prediction.
216 */
217	.align 64
218	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
219SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
220	ANNOTATE_NOENDBR
221	/*
222	 * As executed from retbleed_untrain_ret, this is:
223	 *
224	 *   TEST $0xcc, %bl
225	 *   LFENCE
226	 *   JMP retbleed_return_thunk
227	 *
228	 * Executing the TEST instruction has a side effect of evicting any BTB
229	 * prediction (potentially attacker controlled) attached to the RET, as
230	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
231	 */
232	.byte	0xf6
233
234	/*
235	 * As executed from retbleed_return_thunk, this is a plain RET.
236	 *
237	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
238	 *
239	 * We subsequently jump backwards and architecturally execute the RET.
240	 * This creates a correct BTB prediction (type=ret), but in the
241	 * meantime we suffer Straight Line Speculation (because the type was
242	 * no branch) which is halted by the INT3.
243	 *
244	 * With SMT enabled and STIBP active, a sibling thread cannot poison
245	 * RET's prediction to a type of its choice, but can evict the
246	 * prediction due to competitive sharing. If the prediction is
247	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
248	 * which will be contained safely by the INT3.
249	 */
250SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
251	ret
252	int3
253SYM_CODE_END(retbleed_return_thunk)
254
255	/*
256	 * Ensure the TEST decoding / BTB invalidation is complete.
257	 */
258	lfence
259
260	/*
261	 * Jump back and execute the RET in the middle of the TEST instruction.
262	 * INT3 is for SLS protection.
263	 */
264	jmp retbleed_return_thunk
265	int3
266SYM_FUNC_END(retbleed_untrain_ret)
267__EXPORT_THUNK(retbleed_untrain_ret)
268
269/*
270 * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
271 * above. On kernel entry, srso_untrain_ret() is executed which is a
272 *
273 * movabs $0xccccc30824648d48,%rax
274 *
275 * and when the return thunk executes the inner label srso_safe_ret()
276 * later, it is a stack manipulation and a RET which is mispredicted and
277 * thus a "safe" one to use.
278 */
279	.align 64
280	.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
281SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
282	ANNOTATE_NOENDBR
283	.byte 0x48, 0xb8
284
285/*
286 * This forces the function return instruction to speculate into a trap
287 * (UD2 in srso_return_thunk() below).  This RET will then mispredict
288 * and execution will continue at the return site read from the top of
289 * the stack.
290 */
291SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
292	lea 8(%_ASM_SP), %_ASM_SP
293	ret
294	int3
295	int3
296	/* end of movabs */
297	lfence
298	call srso_safe_ret
299	ud2
300SYM_CODE_END(srso_safe_ret)
301SYM_FUNC_END(srso_untrain_ret)
302__EXPORT_THUNK(srso_untrain_ret)
303
304SYM_CODE_START(srso_return_thunk)
305	UNWIND_HINT_FUNC
306	ANNOTATE_NOENDBR
307	call srso_safe_ret
308	ud2
309SYM_CODE_END(srso_return_thunk)
310
311SYM_FUNC_START(entry_untrain_ret)
312	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
313		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
314		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
315SYM_FUNC_END(entry_untrain_ret)
316__EXPORT_THUNK(entry_untrain_ret)
317
318SYM_CODE_START(__x86_return_thunk)
319	UNWIND_HINT_FUNC
320	ANNOTATE_NOENDBR
321	ANNOTATE_UNRET_SAFE
322	ret
323	int3
324SYM_CODE_END(__x86_return_thunk)
325EXPORT_SYMBOL(__x86_return_thunk)
326
327#endif /* CONFIG_RETHUNK */
328
329#ifdef CONFIG_CALL_DEPTH_TRACKING
330
331	.align 64
332SYM_FUNC_START(__x86_return_skl)
333	ANNOTATE_NOENDBR
334	/*
335	 * Keep the hotpath in a 16byte I-fetch for the non-debug
336	 * case.
337	 */
338	CALL_THUNKS_DEBUG_INC_RETS
339	shlq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
340	jz	1f
341	ANNOTATE_UNRET_SAFE
342	ret
343	int3
3441:
345	CALL_THUNKS_DEBUG_INC_STUFFS
346	.rept	16
347	ANNOTATE_INTRA_FUNCTION_CALL
348	call	2f
349	int3
3502:
351	.endr
352	add	$(8*16), %rsp
353
354	CREDIT_CALL_DEPTH
355
356	ANNOTATE_UNRET_SAFE
357	ret
358	int3
359SYM_FUNC_END(__x86_return_skl)
360
361#endif /* CONFIG_CALL_DEPTH_TRACKING */
362