xref: /linux/arch/x86/kernel/ftrace_64.S (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  Copyright (C) 2014  Steven Rostedt, Red Hat Inc
4 */
5
6#include <linux/export.h>
7#include <linux/cfi_types.h>
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ptrace.h>
11#include <asm/ftrace.h>
12#include <asm/nospec-branch.h>
13#include <asm/unwind_hints.h>
14#include <asm/frame.h>
15
16	.code64
17	.section .text, "ax"
18
19#ifdef CONFIG_FRAME_POINTER
20/* Save parent and function stack frames (rip and rbp) */
21#  define MCOUNT_FRAME_SIZE	(8+16*2)
22#else
23/* No need to save a stack frame */
24# define MCOUNT_FRAME_SIZE	0
25#endif /* CONFIG_FRAME_POINTER */
26
27/* Size of stack used to save mcount regs in save_mcount_regs */
28#define MCOUNT_REG_SIZE		(FRAME_SIZE + MCOUNT_FRAME_SIZE)
29
30/*
31 * gcc -pg option adds a call to 'mcount' in most functions.
32 * When -mfentry is used, the call is to 'fentry' and not 'mcount'
33 * and is done before the function's stack frame is set up.
34 * They both require a set of regs to be saved before calling
35 * any C code and restored before returning back to the function.
36 *
37 * On boot up, all these calls are converted into nops. When tracing
38 * is enabled, the call can jump to either ftrace_caller or
39 * ftrace_regs_caller. Callbacks (tracing functions) that require
40 * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
41 * it. For this reason, the size of the pt_regs structure will be
42 * allocated on the stack and the required mcount registers will
43 * be saved in the locations that pt_regs has them in.
44 */
45
46/*
47 * @added: the amount of stack added before calling this
48 *
49 * After this is called, the following registers contain:
50 *
51 *  %rdi - holds the address that called the trampoline
52 *  %rsi - holds the parent function (traced function's return address)
53 *  %rdx - holds the original %rbp
54 */
55.macro save_mcount_regs added=0
56
57#ifdef CONFIG_FRAME_POINTER
58	/* Save the original rbp */
59	pushq %rbp
60
61	/*
62	 * Stack traces will stop at the ftrace trampoline if the frame pointer
63	 * is not set up properly. If fentry is used, we need to save a frame
64	 * pointer for the parent as well as the function traced, because the
65	 * fentry is called before the stack frame is set up, where as mcount
66	 * is called afterward.
67	 */
68
69	/* Save the parent pointer (skip orig rbp and our return address) */
70	pushq \added+8*2(%rsp)
71	pushq %rbp
72	movq %rsp, %rbp
73	/* Save the return address (now skip orig rbp, rbp and parent) */
74	pushq \added+8*3(%rsp)
75	pushq %rbp
76	movq %rsp, %rbp
77#endif /* CONFIG_FRAME_POINTER */
78
79	/*
80	 * We add enough stack to save all regs.
81	 */
82	subq $(FRAME_SIZE), %rsp
83	movq %rax, RAX(%rsp)
84	movq %rcx, RCX(%rsp)
85	movq %rdx, RDX(%rsp)
86	movq %rsi, RSI(%rsp)
87	movq %rdi, RDI(%rsp)
88	movq %r8, R8(%rsp)
89	movq %r9, R9(%rsp)
90	movq $0, ORIG_RAX(%rsp)
91	/*
92	 * Save the original RBP. Even though the mcount ABI does not
93	 * require this, it helps out callers.
94	 */
95#ifdef CONFIG_FRAME_POINTER
96	movq MCOUNT_REG_SIZE-8(%rsp), %rdx
97#else
98	movq %rbp, %rdx
99#endif
100	movq %rdx, RBP(%rsp)
101
102	/* Copy the parent address into %rsi (second parameter) */
103	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
104
105	 /* Move RIP to its proper location */
106	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
107	movq %rdi, RIP(%rsp)
108
109	/*
110	 * Now %rdi (the first parameter) has the return address of
111	 * where ftrace_call returns. But the callbacks expect the
112	 * address of the call itself.
113	 */
114	subq $MCOUNT_INSN_SIZE, %rdi
115	.endm
116
117.macro restore_mcount_regs save=0
118
119	/* ftrace_regs_caller or frame pointers require this */
120	movq RBP(%rsp), %rbp
121
122	movq R9(%rsp), %r9
123	movq R8(%rsp), %r8
124	movq RDI(%rsp), %rdi
125	movq RSI(%rsp), %rsi
126	movq RDX(%rsp), %rdx
127	movq RCX(%rsp), %rcx
128	movq RAX(%rsp), %rax
129
130	addq $MCOUNT_REG_SIZE-\save, %rsp
131
132	.endm
133
134SYM_TYPED_FUNC_START(ftrace_stub)
135	CALL_DEPTH_ACCOUNT
136	RET
137SYM_FUNC_END(ftrace_stub)
138
139#ifdef CONFIG_FUNCTION_GRAPH_TRACER
140SYM_TYPED_FUNC_START(ftrace_stub_graph)
141	CALL_DEPTH_ACCOUNT
142	RET
143SYM_FUNC_END(ftrace_stub_graph)
144#endif
145
146#ifdef CONFIG_DYNAMIC_FTRACE
147
148SYM_FUNC_START(__fentry__)
149	CALL_DEPTH_ACCOUNT
150	RET
151SYM_FUNC_END(__fentry__)
152EXPORT_SYMBOL(__fentry__)
153
154SYM_FUNC_START(ftrace_caller)
155	/* save_mcount_regs fills in first two parameters */
156	save_mcount_regs
157
158	CALL_DEPTH_ACCOUNT
159
160	/* Stack - skipping return address of ftrace_caller */
161	leaq MCOUNT_REG_SIZE+8(%rsp), %rcx
162	movq %rcx, RSP(%rsp)
163
164SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
165	ANNOTATE_NOENDBR
166	/* Load the ftrace_ops into the 3rd parameter */
167	movq function_trace_op(%rip), %rdx
168
169	/* regs go into 4th parameter */
170	leaq (%rsp), %rcx
171
172	/* Only ops with REGS flag set should have CS register set */
173	movq $0, CS(%rsp)
174
175	/* Account for the function call below */
176	CALL_DEPTH_ACCOUNT
177
178SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
179	ANNOTATE_NOENDBR
180	call ftrace_stub
181
182	/* Handlers can change the RIP */
183	movq RIP(%rsp), %rax
184	movq %rax, MCOUNT_REG_SIZE(%rsp)
185
186	restore_mcount_regs
187
188	/*
189	 * The code up to this label is copied into trampolines so
190	 * think twice before adding any new code or changing the
191	 * layout here.
192	 */
193SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
194	ANNOTATE_NOENDBR
195	RET
196SYM_FUNC_END(ftrace_caller);
197STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
198
199SYM_FUNC_START(ftrace_regs_caller)
200	/* Save the current flags before any operations that can change them */
201	pushfq
202
203	/* added 8 bytes to save flags */
204	save_mcount_regs 8
205	/* save_mcount_regs fills in first two parameters */
206
207	CALL_DEPTH_ACCOUNT
208
209SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
210	ANNOTATE_NOENDBR
211	/* Load the ftrace_ops into the 3rd parameter */
212	movq function_trace_op(%rip), %rdx
213
214	/* Save the rest of pt_regs */
215	movq %r15, R15(%rsp)
216	movq %r14, R14(%rsp)
217	movq %r13, R13(%rsp)
218	movq %r12, R12(%rsp)
219	movq %r11, R11(%rsp)
220	movq %r10, R10(%rsp)
221	movq %rbx, RBX(%rsp)
222	/* Copy saved flags */
223	movq MCOUNT_REG_SIZE(%rsp), %rcx
224	movq %rcx, EFLAGS(%rsp)
225	/* Kernel segments */
226	movq $__KERNEL_DS, %rcx
227	movq %rcx, SS(%rsp)
228	movq $__KERNEL_CS, %rcx
229	movq %rcx, CS(%rsp)
230	/* Stack - skipping return address and flags */
231	leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
232	movq %rcx, RSP(%rsp)
233
234	ENCODE_FRAME_POINTER
235
236	/* regs go into 4th parameter */
237	leaq (%rsp), %rcx
238
239	/* Account for the function call below */
240	CALL_DEPTH_ACCOUNT
241
242SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
243	ANNOTATE_NOENDBR
244	call ftrace_stub
245
246	/* Copy flags back to SS, to restore them */
247	movq EFLAGS(%rsp), %rax
248	movq %rax, MCOUNT_REG_SIZE(%rsp)
249
250	/* Handlers can change the RIP */
251	movq RIP(%rsp), %rax
252	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
253
254	/* restore the rest of pt_regs */
255	movq R15(%rsp), %r15
256	movq R14(%rsp), %r14
257	movq R13(%rsp), %r13
258	movq R12(%rsp), %r12
259	movq R10(%rsp), %r10
260	movq RBX(%rsp), %rbx
261
262	movq ORIG_RAX(%rsp), %rax
263	movq %rax, MCOUNT_REG_SIZE-8(%rsp)
264
265	/*
266	 * If ORIG_RAX is anything but zero, make this a call to that.
267	 * See arch_ftrace_set_direct_caller().
268	 */
269	testq	%rax, %rax
270SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
271	ANNOTATE_NOENDBR
272	jnz	1f
273
274	restore_mcount_regs
275	/* Restore flags */
276	popfq
277
278	/*
279	 * The trampoline will add the return.
280	 */
281SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
282	ANNOTATE_NOENDBR
283	RET
284
285	/* Swap the flags with orig_rax */
2861:	movq MCOUNT_REG_SIZE(%rsp), %rdi
287	movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
288	movq %rax, MCOUNT_REG_SIZE(%rsp)
289
290	restore_mcount_regs 8
291	/* Restore flags */
292	popfq
293	UNWIND_HINT_FUNC
294
295	/*
296	 * The above left an extra return value on the stack; effectively
297	 * doing a tail-call without using a register. This PUSH;RET
298	 * pattern unbalances the RSB, inject a pointless CALL to rebalance.
299	 */
300	ANNOTATE_INTRA_FUNCTION_CALL
301	CALL .Ldo_rebalance
302	int3
303.Ldo_rebalance:
304	add $8, %rsp
305	ALTERNATIVE __stringify(RET), \
306		    __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
307		    X86_FEATURE_CALL_DEPTH
308
309SYM_FUNC_END(ftrace_regs_caller)
310STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
311
312SYM_FUNC_START(ftrace_stub_direct_tramp)
313	CALL_DEPTH_ACCOUNT
314	RET
315SYM_FUNC_END(ftrace_stub_direct_tramp)
316
317#else /* ! CONFIG_DYNAMIC_FTRACE */
318
319SYM_FUNC_START(__fentry__)
320	CALL_DEPTH_ACCOUNT
321
322	cmpq $ftrace_stub, ftrace_trace_function
323	jnz trace
324	RET
325
326trace:
327	/* save_mcount_regs fills in first two parameters */
328	save_mcount_regs
329
330	/*
331	 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
332	 * set (see include/asm/ftrace.h and include/linux/ftrace.h).  Only the
333	 * ip and parent ip are used and the list function is called when
334	 * function tracing is enabled.
335	 */
336	movq ftrace_trace_function, %r8
337	CALL_NOSPEC r8
338	restore_mcount_regs
339
340	jmp ftrace_stub
341SYM_FUNC_END(__fentry__)
342EXPORT_SYMBOL(__fentry__)
343STACK_FRAME_NON_STANDARD_FP(__fentry__)
344
345#endif /* CONFIG_DYNAMIC_FTRACE */
346
347#ifdef CONFIG_FUNCTION_GRAPH_TRACER
348SYM_CODE_START(return_to_handler)
349	UNWIND_HINT_UNDEFINED
350	ANNOTATE_NOENDBR
351	subq  $24, %rsp
352
353	/* Save the return values */
354	movq %rax, (%rsp)
355	movq %rdx, 8(%rsp)
356	movq %rbp, 16(%rsp)
357	movq %rsp, %rdi
358
359	call ftrace_return_to_handler
360
361	movq %rax, %rdi
362	movq 8(%rsp), %rdx
363	movq (%rsp), %rax
364
365	addq $24, %rsp
366	/*
367	 * Jump back to the old return address. This cannot be JMP_NOSPEC rdi
368	 * since IBT would demand that contain ENDBR, which simply isn't so for
369	 * return addresses. Use a retpoline here to keep the RSB balanced.
370	 */
371	ANNOTATE_INTRA_FUNCTION_CALL
372	call .Ldo_rop
373	int3
374.Ldo_rop:
375	mov %rdi, (%rsp)
376	ALTERNATIVE __stringify(RET), \
377		    __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
378		    X86_FEATURE_CALL_DEPTH
379SYM_CODE_END(return_to_handler)
380#endif
381