xref: /linux/arch/arm64/kernel/entry-ftrace.S (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1/*
2 * arch/arm64/kernel/entry-ftrace.S
3 *
4 * Copyright (C) 2013 Linaro Limited
5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/linkage.h>
13#include <asm/ftrace.h>
14#include <asm/insn.h>
15
16/*
17 * Gcc with -pg will put the following code in the beginning of each function:
18 *      mov x0, x30
19 *      bl _mcount
20 *	[function's body ...]
21 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
22 * ftrace is enabled.
23 *
24 * Please note that x0 as an argument will not be used here because we can
25 * get lr(x30) of instrumented function at any time by winding up call stack
26 * as long as the kernel is compiled without -fomit-frame-pointer.
27 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
28 *
29 * stack layout after mcount_enter in _mcount():
30 *
31 * current sp/fp =>  0:+-----+
32 * in _mcount()        | x29 | -> instrumented function's fp
33 *                     +-----+
34 *                     | x30 | -> _mcount()'s lr (= instrumented function's pc)
35 * old sp       => +16:+-----+
36 * when instrumented   |     |
37 * function calls      | ... |
38 * _mcount()           |     |
39 *                     |     |
40 * instrumented => +xx:+-----+
41 * function's fp       | x29 | -> parent's fp
42 *                     +-----+
43 *                     | x30 | -> instrumented function's lr (= parent's pc)
44 *                     +-----+
45 *                     | ... |
46 */
47
48	.macro mcount_enter
49	stp	x29, x30, [sp, #-16]!
50	mov	x29, sp
51	.endm
52
53	.macro mcount_exit
54	ldp	x29, x30, [sp], #16
55	ret
56	.endm
57
58	.macro mcount_adjust_addr rd, rn
59	sub	\rd, \rn, #AARCH64_INSN_SIZE
60	.endm
61
62	/* for instrumented function's parent */
63	.macro mcount_get_parent_fp reg
64	ldr	\reg, [x29]
65	ldr	\reg, [\reg]
66	.endm
67
68	/* for instrumented function */
69	.macro mcount_get_pc0 reg
70	mcount_adjust_addr	\reg, x30
71	.endm
72
73	.macro mcount_get_pc reg
74	ldr	\reg, [x29, #8]
75	mcount_adjust_addr	\reg, \reg
76	.endm
77
78	.macro mcount_get_lr reg
79	ldr	\reg, [x29]
80	ldr	\reg, [\reg, #8]
81	mcount_adjust_addr	\reg, \reg
82	.endm
83
84	.macro mcount_get_lr_addr reg
85	ldr	\reg, [x29]
86	add	\reg, \reg, #8
87	.endm
88
89#ifndef CONFIG_DYNAMIC_FTRACE
90/*
91 * void _mcount(unsigned long return_address)
92 * @return_address: return address to instrumented function
93 *
94 * This function makes calls, if enabled, to:
95 *     - tracer function to probe instrumented function's entry,
96 *     - ftrace_graph_caller to set up an exit hook
97 */
98ENTRY(_mcount)
99	mcount_enter
100
101	adrp	x0, ftrace_trace_function
102	ldr	x2, [x0, #:lo12:ftrace_trace_function]
103	adr	x0, ftrace_stub
104	cmp	x0, x2			// if (ftrace_trace_function
105	b.eq	skip_ftrace_call	//     != ftrace_stub) {
106
107	mcount_get_pc	x0		//       function's pc
108	mcount_get_lr	x1		//       function's lr (= parent's pc)
109	blr	x2			//   (*ftrace_trace_function)(pc, lr);
110
111#ifndef CONFIG_FUNCTION_GRAPH_TRACER
112skip_ftrace_call:			//   return;
113	mcount_exit			// }
114#else
115	mcount_exit			//   return;
116					// }
117skip_ftrace_call:
118	adrp	x1, ftrace_graph_return
119	ldr	x2, [x1, #:lo12:ftrace_graph_return]
120	cmp	x0, x2			//   if ((ftrace_graph_return
121	b.ne	ftrace_graph_caller	//        != ftrace_stub)
122
123	adrp	x1, ftrace_graph_entry	//     || (ftrace_graph_entry
124	adrp	x0, ftrace_graph_entry_stub //     != ftrace_graph_entry_stub))
125	ldr	x2, [x1, #:lo12:ftrace_graph_entry]
126	add	x0, x0, #:lo12:ftrace_graph_entry_stub
127	cmp	x0, x2
128	b.ne	ftrace_graph_caller	//     ftrace_graph_caller();
129
130	mcount_exit
131#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
132ENDPROC(_mcount)
133
134#else /* CONFIG_DYNAMIC_FTRACE */
135/*
136 * _mcount() is used to build the kernel with -pg option, but all the branch
137 * instructions to _mcount() are replaced to NOP initially at kernel start up,
138 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
139 * NOP when disabled per-function base.
140 */
141ENTRY(_mcount)
142	ret
143ENDPROC(_mcount)
144
145/*
146 * void ftrace_caller(unsigned long return_address)
147 * @return_address: return address to instrumented function
148 *
149 * This function is a counterpart of _mcount() in 'static' ftrace, and
150 * makes calls to:
151 *     - tracer function to probe instrumented function's entry,
152 *     - ftrace_graph_caller to set up an exit hook
153 */
154ENTRY(ftrace_caller)
155	mcount_enter
156
157	mcount_get_pc0	x0		//     function's pc
158	mcount_get_lr	x1		//     function's lr
159
160	.global ftrace_call
161ftrace_call:				// tracer(pc, lr);
162	nop				// This will be replaced with "bl xxx"
163					// where xxx can be any kind of tracer.
164
165#ifdef CONFIG_FUNCTION_GRAPH_TRACER
166	.global ftrace_graph_call
167ftrace_graph_call:			// ftrace_graph_caller();
168	nop				// If enabled, this will be replaced
169					// "b ftrace_graph_caller"
170#endif
171
172	mcount_exit
173ENDPROC(ftrace_caller)
174#endif /* CONFIG_DYNAMIC_FTRACE */
175
176ENTRY(ftrace_stub)
177	ret
178ENDPROC(ftrace_stub)
179
180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
181	/* save return value regs*/
182	.macro save_return_regs
183	sub sp, sp, #64
184	stp x0, x1, [sp]
185	stp x2, x3, [sp, #16]
186	stp x4, x5, [sp, #32]
187	stp x6, x7, [sp, #48]
188	.endm
189
190	/* restore return value regs*/
191	.macro restore_return_regs
192	ldp x0, x1, [sp]
193	ldp x2, x3, [sp, #16]
194	ldp x4, x5, [sp, #32]
195	ldp x6, x7, [sp, #48]
196	add sp, sp, #64
197	.endm
198
199/*
200 * void ftrace_graph_caller(void)
201 *
202 * Called from _mcount() or ftrace_caller() when function_graph tracer is
203 * selected.
204 * This function w/ prepare_ftrace_return() fakes link register's value on
205 * the call stack in order to intercept instrumented function's return path
206 * and run return_to_handler() later on its exit.
207 */
208ENTRY(ftrace_graph_caller)
209	mcount_get_lr_addr	  x0	//     pointer to function's saved lr
210	mcount_get_pc		  x1	//     function's pc
211	mcount_get_parent_fp	  x2	//     parent's fp
212	bl	prepare_ftrace_return	// prepare_ftrace_return(&lr, pc, fp)
213
214	mcount_exit
215ENDPROC(ftrace_graph_caller)
216
217/*
218 * void return_to_handler(void)
219 *
220 * Run ftrace_return_to_handler() before going back to parent.
221 * @fp is checked against the value passed by ftrace_graph_caller()
222 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
223 */
224ENTRY(return_to_handler)
225	save_return_regs
226	mov	x0, x29			//     parent's fp
227	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
228	mov	x30, x0			// restore the original return address
229	restore_return_regs
230	ret
231END(return_to_handler)
232#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
233