xref: /linux/arch/powerpc/kernel/trace/ftrace_entry.S (revision 0f71dcfb4aef6043da6cc509e7a7f6a3ae87c12d)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Split from ftrace_64.S
4 */
5
6#include <linux/export.h>
7#include <linux/magic.h>
8#include <asm/ppc_asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11#include <asm/ppc-opcode.h>
12#include <asm/thread_info.h>
13#include <asm/bug.h>
14#include <asm/ptrace.h>
15
16/*
17 *
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
20 *
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
24 *
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
27 *
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
30 * call site in A.
31 *
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
34 */
35.macro	ftrace_regs_entry allregs
36	/* Create our stack frame + pt_regs */
37	PPC_STLU	r1,-SWITCH_FRAME_SIZE(r1)
38
39	/* Save all gprs to pt_regs */
40	SAVE_GPR(0, r1)
41	SAVE_GPRS(3, 10, r1)
42
43#ifdef CONFIG_PPC64
44	/* Save the original return address in A's stack frame */
45	std	r0, LRSAVE+SWITCH_FRAME_SIZE(r1)
46	/* Ok to continue? */
47	lbz	r3, PACA_FTRACE_ENABLED(r13)
48	cmpdi	r3, 0
49	beq	ftrace_no_trace
50#endif
51
52	.if \allregs == 1
53	SAVE_GPR(2, r1)
54	SAVE_GPRS(11, 31, r1)
55	.else
56#ifdef CONFIG_LIVEPATCH_64
57	SAVE_GPR(14, r1)
58#endif
59	.endif
60
61	/* Save previous stack pointer (r1) */
62	addi	r8, r1, SWITCH_FRAME_SIZE
63	PPC_STL	r8, GPR1(r1)
64
65	.if \allregs == 1
66	/* Load special regs for save below */
67	mfmsr   r8
68	mfctr   r9
69	mfxer   r10
70	mfcr	r11
71	.else
72	/* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
73	li	r8, 0
74	.endif
75
76	/* Get the _mcount() call site out of LR */
77	mflr	r7
78	/* Save it as pt_regs->nip */
79	PPC_STL	r7, _NIP(r1)
80	/* Save the read LR in pt_regs->link */
81	PPC_STL	r0, _LINK(r1)
82
83#ifdef CONFIG_PPC64
84	/* Save callee's TOC in the ABI compliant location */
85	std	r2, STK_GOT(r1)
86	LOAD_PACA_TOC()		/* get kernel TOC in r2 */
87	LOAD_REG_ADDR(r3, function_trace_op)
88	ld	r5,0(r3)
89#else
90	lis	r3,function_trace_op@ha
91	lwz	r5,function_trace_op@l(r3)
92#endif
93
94#ifdef CONFIG_LIVEPATCH_64
95	mr	r14, r7		/* remember old NIP */
96#endif
97
98	/* Calculate ip from nip-4 into r3 for call below */
99	subi    r3, r7, MCOUNT_INSN_SIZE
100
101	/* Put the original return address in r4 as parent_ip */
102	mr	r4, r0
103
104	/* Save special regs */
105	PPC_STL	r8, _MSR(r1)
106	.if \allregs == 1
107	PPC_STL	r9, _CTR(r1)
108	PPC_STL	r10, _XER(r1)
109	PPC_STL	r11, _CCR(r1)
110	.endif
111
112	/* Load &pt_regs in r6 for call below */
113	addi    r6, r1, STACK_INT_FRAME_REGS
114.endm
115
116.macro	ftrace_regs_exit allregs
117	/* Load ctr with the possibly modified NIP */
118	PPC_LL	r3, _NIP(r1)
119	mtctr	r3
120
121#ifdef CONFIG_LIVEPATCH_64
122	cmpd	r14, r3		/* has NIP been altered? */
123#endif
124
125	/* Restore gprs */
126	.if \allregs == 1
127	REST_GPRS(2, 31, r1)
128	.else
129	REST_GPRS(3, 10, r1)
130#ifdef CONFIG_LIVEPATCH_64
131	REST_GPR(14, r1)
132#endif
133	.endif
134
135	/* Restore possibly modified LR */
136	PPC_LL	r0, _LINK(r1)
137	mtlr	r0
138
139#ifdef CONFIG_PPC64
140	/* Restore callee's TOC */
141	ld	r2, STK_GOT(r1)
142#endif
143
144	/* Pop our stack frame */
145	addi r1, r1, SWITCH_FRAME_SIZE
146
147#ifdef CONFIG_LIVEPATCH_64
148        /* Based on the cmpd above, if the NIP was altered handle livepatch */
149	bne-	livepatch_handler
150#endif
151	bctr			/* jump after _mcount site */
152.endm
153
154_GLOBAL(ftrace_regs_caller)
155	ftrace_regs_entry 1
156	/* ftrace_call(r3, r4, r5, r6) */
157.globl ftrace_regs_call
158ftrace_regs_call:
159	bl	ftrace_stub
160	nop
161	ftrace_regs_exit 1
162
163_GLOBAL(ftrace_caller)
164	ftrace_regs_entry 0
165	/* ftrace_call(r3, r4, r5, r6) */
166.globl ftrace_call
167ftrace_call:
168	bl	ftrace_stub
169	nop
170	ftrace_regs_exit 0
171
172_GLOBAL(ftrace_stub)
173	blr
174
175#ifdef CONFIG_PPC64
176ftrace_no_trace:
177	mflr	r3
178	mtctr	r3
179	REST_GPR(3, r1)
180	addi	r1, r1, SWITCH_FRAME_SIZE
181	mtlr	r0
182	bctr
183#endif
184
185#ifdef CONFIG_LIVEPATCH_64
186	/*
187	 * This function runs in the mcount context, between two functions. As
188	 * such it can only clobber registers which are volatile and used in
189	 * function linkage.
190	 *
191	 * We get here when a function A, calls another function B, but B has
192	 * been live patched with a new function C.
193	 *
194	 * On entry:
195	 *  - we have no stack frame and can not allocate one
196	 *  - LR points back to the original caller (in A)
197	 *  - CTR holds the new NIP in C
198	 *  - r0, r11 & r12 are free
199	 */
200livepatch_handler:
201	ld	r12, PACA_THREAD_INFO(r13)
202
203	/* Allocate 3 x 8 bytes */
204	ld	r11, TI_livepatch_sp(r12)
205	addi	r11, r11, 24
206	std	r11, TI_livepatch_sp(r12)
207
208	/* Save toc & real LR on livepatch stack */
209	std	r2,  -24(r11)
210	mflr	r12
211	std	r12, -16(r11)
212
213	/* Store stack end marker */
214	lis     r12, STACK_END_MAGIC@h
215	ori     r12, r12, STACK_END_MAGIC@l
216	std	r12, -8(r11)
217
218	/* Put ctr in r12 for global entry and branch there */
219	mfctr	r12
220	bctrl
221
222	/*
223	 * Now we are returning from the patched function to the original
224	 * caller A. We are free to use r11, r12 and we can use r2 until we
225	 * restore it.
226	 */
227
228	ld	r12, PACA_THREAD_INFO(r13)
229
230	ld	r11, TI_livepatch_sp(r12)
231
232	/* Check stack marker hasn't been trashed */
233	lis     r2,  STACK_END_MAGIC@h
234	ori     r2,  r2, STACK_END_MAGIC@l
235	ld	r12, -8(r11)
2361:	tdne	r12, r2
237	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
238
239	/* Restore LR & toc from livepatch stack */
240	ld	r12, -16(r11)
241	mtlr	r12
242	ld	r2,  -24(r11)
243
244	/* Pop livepatch stack frame */
245	ld	r12, PACA_THREAD_INFO(r13)
246	subi	r11, r11, 24
247	std	r11, TI_livepatch_sp(r12)
248
249	/* Return to original caller of live patched function */
250	blr
251#endif /* CONFIG_LIVEPATCH */
252
253#ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
254_GLOBAL(mcount)
255_GLOBAL(_mcount)
256EXPORT_SYMBOL(_mcount)
257	mflr	r12
258	mtctr	r12
259	mtlr	r0
260	bctr
261#endif
262
263#ifdef CONFIG_FUNCTION_GRAPH_TRACER
264_GLOBAL(return_to_handler)
265	/* need to save return values */
266#ifdef CONFIG_PPC64
267	std	r4,  -32(r1)
268	std	r3,  -24(r1)
269	/* save TOC */
270	std	r2,  -16(r1)
271	std	r31, -8(r1)
272	mr	r31, r1
273	stdu	r1, -112(r1)
274
275	/*
276	 * We might be called from a module.
277	 * Switch to our TOC to run inside the core kernel.
278	 */
279	LOAD_PACA_TOC()
280#else
281	stwu	r1, -16(r1)
282	stw	r3, 8(r1)
283	stw	r4, 12(r1)
284#endif
285
286	bl	ftrace_return_to_handler
287	nop
288
289	/* return value has real return address */
290	mtlr	r3
291
292#ifdef CONFIG_PPC64
293	ld	r1, 0(r1)
294	ld	r4,  -32(r1)
295	ld	r3,  -24(r1)
296	ld	r2,  -16(r1)
297	ld	r31, -8(r1)
298#else
299	lwz	r3, 8(r1)
300	lwz	r4, 12(r1)
301	addi	r1, r1, 16
302#endif
303
304	/* Jump back to real return address */
305	blr
306#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
307
308.pushsection ".tramp.ftrace.text","aw",@progbits;
309.globl ftrace_tramp_text
310ftrace_tramp_text:
311	.space 32
312.popsection
313
314.pushsection ".tramp.ftrace.init","aw",@progbits;
315.globl ftrace_tramp_init
316ftrace_tramp_init:
317	.space 32
318.popsection
319