xref: /linux/arch/arm64/kernel/entry-ftrace.S (revision 52a9dab6d892763b2a8334a568bd4e2c1a6fde66)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm64/kernel/entry-ftrace.S
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8
9#include <linux/linkage.h>
10#include <asm/asm-offsets.h>
11#include <asm/assembler.h>
12#include <asm/ftrace.h>
13#include <asm/insn.h>
14
15#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
16/*
17 * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
18 * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
19 * ftrace_make_call() have patched those NOPs to:
20 *
21 * 	MOV	X9, LR
22 * 	BL	<entry>
23 *
24 * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
25 *
26 * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
27 * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
28 * clobber.
29 *
30 * We save the callsite's context into a pt_regs before invoking any ftrace
31 * callbacks. So that we can get a sensible backtrace, we create a stack record
32 * for the callsite and the ftrace entry assembly. This is not sufficient for
33 * reliable stacktrace: until we create the callsite stack record, its caller
34 * is missing from the LR and existing chain of frame records.
35 */
36	.macro  ftrace_regs_entry, allregs=0
37	/* Make room for pt_regs, plus a callee frame */
38	sub	sp, sp, #(PT_REGS_SIZE + 16)
39
40	/* Save function arguments (and x9 for simplicity) */
41	stp	x0, x1, [sp, #S_X0]
42	stp	x2, x3, [sp, #S_X2]
43	stp	x4, x5, [sp, #S_X4]
44	stp	x6, x7, [sp, #S_X6]
45	stp	x8, x9, [sp, #S_X8]
46
47	/* Optionally save the callee-saved registers, always save the FP */
48	.if \allregs == 1
49	stp	x10, x11, [sp, #S_X10]
50	stp	x12, x13, [sp, #S_X12]
51	stp	x14, x15, [sp, #S_X14]
52	stp	x16, x17, [sp, #S_X16]
53	stp	x18, x19, [sp, #S_X18]
54	stp	x20, x21, [sp, #S_X20]
55	stp	x22, x23, [sp, #S_X22]
56	stp	x24, x25, [sp, #S_X24]
57	stp	x26, x27, [sp, #S_X26]
58	stp	x28, x29, [sp, #S_X28]
59	.else
60	str	x29, [sp, #S_FP]
61	.endif
62
63	/* Save the callsite's SP and LR */
64	add	x10, sp, #(PT_REGS_SIZE + 16)
65	stp	x9, x10, [sp, #S_LR]
66
67	/* Save the PC after the ftrace callsite */
68	str	x30, [sp, #S_PC]
69
70	/* Create a frame record for the callsite above pt_regs */
71	stp	x29, x9, [sp, #PT_REGS_SIZE]
72	add	x29, sp, #PT_REGS_SIZE
73
74	/* Create our frame record within pt_regs. */
75	stp	x29, x30, [sp, #S_STACKFRAME]
76	add	x29, sp, #S_STACKFRAME
77	.endm
78
79SYM_CODE_START(ftrace_regs_caller)
80	bti	c
81	ftrace_regs_entry	1
82	b	ftrace_common
83SYM_CODE_END(ftrace_regs_caller)
84
85SYM_CODE_START(ftrace_caller)
86	bti	c
87	ftrace_regs_entry	0
88	b	ftrace_common
89SYM_CODE_END(ftrace_caller)
90
91SYM_CODE_START(ftrace_common)
92	sub	x0, x30, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
93	mov	x1, x9				// parent_ip (callsite's LR)
94	ldr_l	x2, function_trace_op		// op
95	mov	x3, sp				// regs
96
97SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
98	bl	ftrace_stub
99
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
102	nop				// If enabled, this will be replaced
103					// "b ftrace_graph_caller"
104#endif
105
106/*
107 * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
108 * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
109 * to restore x0-x8, x29, and x30.
110 */
111ftrace_common_return:
112	/* Restore function arguments */
113	ldp	x0, x1, [sp]
114	ldp	x2, x3, [sp, #S_X2]
115	ldp	x4, x5, [sp, #S_X4]
116	ldp	x6, x7, [sp, #S_X6]
117	ldr	x8, [sp, #S_X8]
118
119	/* Restore the callsite's FP, LR, PC */
120	ldr	x29, [sp, #S_FP]
121	ldr	x30, [sp, #S_LR]
122	ldr	x9, [sp, #S_PC]
123
124	/* Restore the callsite's SP */
125	add	sp, sp, #PT_REGS_SIZE + 16
126
127	ret	x9
128SYM_CODE_END(ftrace_common)
129
130#ifdef CONFIG_FUNCTION_GRAPH_TRACER
131SYM_CODE_START(ftrace_graph_caller)
132	ldr	x0, [sp, #S_PC]
133	sub	x0, x0, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
134	add	x1, sp, #S_LR			// parent_ip (callsite's LR)
135	ldr	x2, [sp, #PT_REGS_SIZE]	   	// parent fp (callsite's FP)
136	bl	prepare_ftrace_return
137	b	ftrace_common_return
138SYM_CODE_END(ftrace_graph_caller)
139#endif
140
141#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
142
143/*
144 * Gcc with -pg will put the following code in the beginning of each function:
145 *      mov x0, x30
146 *      bl _mcount
147 *	[function's body ...]
148 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
149 * ftrace is enabled.
150 *
151 * Please note that x0 as an argument will not be used here because we can
152 * get lr(x30) of instrumented function at any time by winding up call stack
153 * as long as the kernel is compiled without -fomit-frame-pointer.
154 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
155 *
156 * stack layout after mcount_enter in _mcount():
157 *
158 * current sp/fp =>  0:+-----+
159 * in _mcount()        | x29 | -> instrumented function's fp
160 *                     +-----+
161 *                     | x30 | -> _mcount()'s lr (= instrumented function's pc)
162 * old sp       => +16:+-----+
163 * when instrumented   |     |
164 * function calls      | ... |
165 * _mcount()           |     |
166 *                     |     |
167 * instrumented => +xx:+-----+
168 * function's fp       | x29 | -> parent's fp
169 *                     +-----+
170 *                     | x30 | -> instrumented function's lr (= parent's pc)
171 *                     +-----+
172 *                     | ... |
173 */
174
175	.macro mcount_enter
176	stp	x29, x30, [sp, #-16]!
177	mov	x29, sp
178	.endm
179
180	.macro mcount_exit
181	ldp	x29, x30, [sp], #16
182	ret
183	.endm
184
185	.macro mcount_adjust_addr rd, rn
186	sub	\rd, \rn, #AARCH64_INSN_SIZE
187	.endm
188
189	/* for instrumented function's parent */
190	.macro mcount_get_parent_fp reg
191	ldr	\reg, [x29]
192	ldr	\reg, [\reg]
193	.endm
194
195	/* for instrumented function */
196	.macro mcount_get_pc0 reg
197	mcount_adjust_addr	\reg, x30
198	.endm
199
200	.macro mcount_get_pc reg
201	ldr	\reg, [x29, #8]
202	mcount_adjust_addr	\reg, \reg
203	.endm
204
205	.macro mcount_get_lr reg
206	ldr	\reg, [x29]
207	ldr	\reg, [\reg, #8]
208	.endm
209
210	.macro mcount_get_lr_addr reg
211	ldr	\reg, [x29]
212	add	\reg, \reg, #8
213	.endm
214
215#ifndef CONFIG_DYNAMIC_FTRACE
216/*
217 * void _mcount(unsigned long return_address)
218 * @return_address: return address to instrumented function
219 *
220 * This function makes calls, if enabled, to:
221 *     - tracer function to probe instrumented function's entry,
222 *     - ftrace_graph_caller to set up an exit hook
223 */
224SYM_FUNC_START(_mcount)
225	mcount_enter
226
227	ldr_l	x2, ftrace_trace_function
228	adr	x0, ftrace_stub
229	cmp	x0, x2			// if (ftrace_trace_function
230	b.eq	skip_ftrace_call	//     != ftrace_stub) {
231
232	mcount_get_pc	x0		//       function's pc
233	mcount_get_lr	x1		//       function's lr (= parent's pc)
234	blr	x2			//   (*ftrace_trace_function)(pc, lr);
235
236skip_ftrace_call:			// }
237#ifdef CONFIG_FUNCTION_GRAPH_TRACER
238	ldr_l	x2, ftrace_graph_return
239	cmp	x0, x2			//   if ((ftrace_graph_return
240	b.ne	ftrace_graph_caller	//        != ftrace_stub)
241
242	ldr_l	x2, ftrace_graph_entry	//     || (ftrace_graph_entry
243	adr_l	x0, ftrace_graph_entry_stub //     != ftrace_graph_entry_stub))
244	cmp	x0, x2
245	b.ne	ftrace_graph_caller	//     ftrace_graph_caller();
246#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
247	mcount_exit
248SYM_FUNC_END(_mcount)
249EXPORT_SYMBOL(_mcount)
250NOKPROBE(_mcount)
251
252#else /* CONFIG_DYNAMIC_FTRACE */
253/*
254 * _mcount() is used to build the kernel with -pg option, but all the branch
255 * instructions to _mcount() are replaced to NOP initially at kernel start up,
256 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
257 * NOP when disabled per-function base.
258 */
259SYM_FUNC_START(_mcount)
260	ret
261SYM_FUNC_END(_mcount)
262EXPORT_SYMBOL(_mcount)
263NOKPROBE(_mcount)
264
265/*
266 * void ftrace_caller(unsigned long return_address)
267 * @return_address: return address to instrumented function
268 *
269 * This function is a counterpart of _mcount() in 'static' ftrace, and
270 * makes calls to:
271 *     - tracer function to probe instrumented function's entry,
272 *     - ftrace_graph_caller to set up an exit hook
273 */
274SYM_FUNC_START(ftrace_caller)
275	mcount_enter
276
277	mcount_get_pc0	x0		//     function's pc
278	mcount_get_lr	x1		//     function's lr
279
280SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)	// tracer(pc, lr);
281	nop				// This will be replaced with "bl xxx"
282					// where xxx can be any kind of tracer.
283
284#ifdef CONFIG_FUNCTION_GRAPH_TRACER
285SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
286	nop				// If enabled, this will be replaced
287					// "b ftrace_graph_caller"
288#endif
289
290	mcount_exit
291SYM_FUNC_END(ftrace_caller)
292#endif /* CONFIG_DYNAMIC_FTRACE */
293
294#ifdef CONFIG_FUNCTION_GRAPH_TRACER
295/*
296 * void ftrace_graph_caller(void)
297 *
298 * Called from _mcount() or ftrace_caller() when function_graph tracer is
299 * selected.
300 * This function w/ prepare_ftrace_return() fakes link register's value on
301 * the call stack in order to intercept instrumented function's return path
302 * and run return_to_handler() later on its exit.
303 */
304SYM_FUNC_START(ftrace_graph_caller)
305	mcount_get_pc		  x0	//     function's pc
306	mcount_get_lr_addr	  x1	//     pointer to function's saved lr
307	mcount_get_parent_fp	  x2	//     parent's fp
308	bl	prepare_ftrace_return	// prepare_ftrace_return(pc, &lr, fp)
309
310	mcount_exit
311SYM_FUNC_END(ftrace_graph_caller)
312#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
313#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
314
315SYM_FUNC_START(ftrace_stub)
316	ret
317SYM_FUNC_END(ftrace_stub)
318
319#ifdef CONFIG_FUNCTION_GRAPH_TRACER
320/*
321 * void return_to_handler(void)
322 *
323 * Run ftrace_return_to_handler() before going back to parent.
324 * @fp is checked against the value passed by ftrace_graph_caller().
325 */
326SYM_CODE_START(return_to_handler)
327	/* save return value regs */
328	sub sp, sp, #64
329	stp x0, x1, [sp]
330	stp x2, x3, [sp, #16]
331	stp x4, x5, [sp, #32]
332	stp x6, x7, [sp, #48]
333
334	mov	x0, x29			//     parent's fp
335	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
336	mov	x30, x0			// restore the original return address
337
338	/* restore return value regs */
339	ldp x0, x1, [sp]
340	ldp x2, x3, [sp, #16]
341	ldp x4, x5, [sp, #32]
342	ldp x6, x7, [sp, #48]
343	add sp, sp, #64
344
345	ret
346SYM_CODE_END(return_to_handler)
347#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
348