xref: /linux/arch/mips/kernel/ftrace.c (revision 4c9f4865f4604744d4f1a43db22ac6ec9dc8e587)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
7  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
8  *
9  * Thanks goes to Steven Rostedt for writing the original x86 version.
10  */
11 
12 #include <linux/uaccess.h>
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
15 #include <linux/syscalls.h>
16 
17 #include <asm/asm.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/cacheflush.h>
20 #include <asm/syscall.h>
21 #include <asm/uasm.h>
22 #include <asm/unistd.h>
23 
24 #include <asm-generic/sections.h>
25 
26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
27 #define MCOUNT_OFFSET_INSNS 5
28 #else
29 #define MCOUNT_OFFSET_INSNS 4
30 #endif
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE
33 
34 /* Arch override because MIPS doesn't need to run this from stop_machine() */
35 void arch_ftrace_update_code(int command)
36 {
37 	ftrace_modify_all_code(command);
38 }
39 
40 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
41 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
42 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
43 
44 #define INSN_NOP 0x00000000	/* nop */
45 #define INSN_JAL(addr)	\
46 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
47 
48 static unsigned int insn_jal_ftrace_caller __read_mostly;
49 static unsigned int insn_la_mcount[2] __read_mostly;
50 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
51 
52 static inline void ftrace_dyn_arch_init_insns(void)
53 {
54 	u32 *buf;
55 	unsigned int v1;
56 
57 	/* la v1, _mcount */
58 	v1 = 3;
59 	buf = (u32 *)&insn_la_mcount[0];
60 	UASM_i_LA(&buf, v1, MCOUNT_ADDR);
61 
62 	/* jal (ftrace_caller + 8), jump over the first two instruction */
63 	buf = (u32 *)&insn_jal_ftrace_caller;
64 	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
65 
66 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
67 	/* j ftrace_graph_caller */
68 	buf = (u32 *)&insn_j_ftrace_graph_caller;
69 	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
70 #endif
71 }
72 
73 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
74 {
75 	int faulted;
76 
77 	/* *(unsigned int *)ip = new_code; */
78 	safe_store_code(new_code, ip, faulted);
79 
80 	if (unlikely(faulted))
81 		return -EFAULT;
82 
83 	flush_icache_range(ip, ip + 8);
84 
85 	return 0;
86 }
87 
88 #ifndef CONFIG_64BIT
89 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
90 				unsigned int new_code2)
91 {
92 	int faulted;
93 	mm_segment_t old_fs;
94 
95 	safe_store_code(new_code1, ip, faulted);
96 	if (unlikely(faulted))
97 		return -EFAULT;
98 
99 	ip += 4;
100 	safe_store_code(new_code2, ip, faulted);
101 	if (unlikely(faulted))
102 		return -EFAULT;
103 
104 	ip -= 4;
105 	old_fs = get_fs();
106 	set_fs(KERNEL_DS);
107 	flush_icache_range(ip, ip + 8);
108 	set_fs(old_fs);
109 
110 	return 0;
111 }
112 
113 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
114 				 unsigned int new_code2)
115 {
116 	int faulted;
117 	mm_segment_t old_fs;
118 
119 	ip += 4;
120 	safe_store_code(new_code2, ip, faulted);
121 	if (unlikely(faulted))
122 		return -EFAULT;
123 
124 	ip -= 4;
125 	safe_store_code(new_code1, ip, faulted);
126 	if (unlikely(faulted))
127 		return -EFAULT;
128 
129 	old_fs = get_fs();
130 	set_fs(KERNEL_DS);
131 	flush_icache_range(ip, ip + 8);
132 	set_fs(old_fs);
133 
134 	return 0;
135 }
136 #endif
137 
138 /*
139  * The details about the calling site of mcount on MIPS
140  *
141  * 1. For kernel:
142  *
143  * move at, ra
144  * jal _mcount		--> nop
145  *  sub sp, sp, 8	--> nop  (CONFIG_32BIT)
146  *
147  * 2. For modules:
148  *
149  * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
150  *
151  * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
152  * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
153  * move at, ra
154  * move $12, ra_address
155  * jalr v1
156  *  sub sp, sp, 8
157  *				    1: offset = 5 instructions
158  * 2.2 For the Other situations
159  *
160  * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
161  * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
162  * move at, ra
163  * jalr v1
164  *  nop | move $12, ra_address | sub sp, sp, 8
165  *				    1: offset = 4 instructions
166  */
167 
168 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
169 
170 int ftrace_make_nop(struct module *mod,
171 		    struct dyn_ftrace *rec, unsigned long addr)
172 {
173 	unsigned int new;
174 	unsigned long ip = rec->ip;
175 
176 	/*
177 	 * If ip is in kernel space, no long call, otherwise, long call is
178 	 * needed.
179 	 */
180 	new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
181 #ifdef CONFIG_64BIT
182 	return ftrace_modify_code(ip, new);
183 #else
184 	/*
185 	 * On 32 bit MIPS platforms, gcc adds a stack adjust
186 	 * instruction in the delay slot after the branch to
187 	 * mcount and expects mcount to restore the sp on return.
188 	 * This is based on a legacy API and does nothing but
189 	 * waste instructions so it's being removed at runtime.
190 	 */
191 	return ftrace_modify_code_2(ip, new, INSN_NOP);
192 #endif
193 }
194 
195 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
196 {
197 	unsigned int new;
198 	unsigned long ip = rec->ip;
199 
200 	new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
201 
202 #ifdef CONFIG_64BIT
203 	return ftrace_modify_code(ip, new);
204 #else
205 	return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
206 						INSN_NOP : insn_la_mcount[1]);
207 #endif
208 }
209 
210 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
211 
212 int ftrace_update_ftrace_func(ftrace_func_t func)
213 {
214 	unsigned int new;
215 
216 	new = INSN_JAL((unsigned long)func);
217 
218 	return ftrace_modify_code(FTRACE_CALL_IP, new);
219 }
220 
221 int __init ftrace_dyn_arch_init(void)
222 {
223 	/* Encode the instructions when booting */
224 	ftrace_dyn_arch_init_insns();
225 
226 	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
227 	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
228 
229 	return 0;
230 }
231 #endif	/* CONFIG_DYNAMIC_FTRACE */
232 
233 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
234 
235 #ifdef CONFIG_DYNAMIC_FTRACE
236 
237 extern void ftrace_graph_call(void);
238 #define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
239 
240 int ftrace_enable_ftrace_graph_caller(void)
241 {
242 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
243 			insn_j_ftrace_graph_caller);
244 }
245 
246 int ftrace_disable_ftrace_graph_caller(void)
247 {
248 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
249 }
250 
251 #endif	/* CONFIG_DYNAMIC_FTRACE */
252 
253 #ifndef KBUILD_MCOUNT_RA_ADDRESS
254 
255 #define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */
256 #define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
257 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
258 
259 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
260 		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
261 {
262 	unsigned long sp, ip, tmp;
263 	unsigned int code;
264 	int faulted;
265 
266 	/*
267 	 * For module, move the ip from the return address after the
268 	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
269 	 * kernel, move after the instruction "move ra, at"(offset is 16)
270 	 */
271 	ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
272 
273 	/*
274 	 * search the text until finding the non-store instruction or "s{d,w}
275 	 * ra, offset(sp)" instruction
276 	 */
277 	do {
278 		/* get the code at "ip": code = *(unsigned int *)ip; */
279 		safe_load_code(code, ip, faulted);
280 
281 		if (unlikely(faulted))
282 			return 0;
283 		/*
284 		 * If we hit the non-store instruction before finding where the
285 		 * ra is stored, then this is a leaf function and it does not
286 		 * store the ra on the stack
287 		 */
288 		if ((code & S_R_SP) != S_R_SP)
289 			return parent_ra_addr;
290 
291 		/* Move to the next instruction */
292 		ip -= 4;
293 	} while ((code & S_RA_SP) != S_RA_SP);
294 
295 	sp = fp + (code & OFFSET_MASK);
296 
297 	/* tmp = *(unsigned long *)sp; */
298 	safe_load_stack(tmp, sp, faulted);
299 	if (unlikely(faulted))
300 		return 0;
301 
302 	if (tmp == old_parent_ra)
303 		return sp;
304 	return 0;
305 }
306 
307 #endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
308 
309 /*
310  * Hook the return address and push it in the stack of return addrs
311  * in current thread info.
312  */
313 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
314 			   unsigned long fp)
315 {
316 	unsigned long old_parent_ra;
317 	unsigned long return_hooker = (unsigned long)
318 	    &return_to_handler;
319 	int faulted, insns;
320 
321 	if (unlikely(ftrace_graph_is_dead()))
322 		return;
323 
324 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
325 		return;
326 
327 	/*
328 	 * "parent_ra_addr" is the stack address where the return address of
329 	 * the caller of _mcount is saved.
330 	 *
331 	 * If gcc < 4.5, a leaf function does not save the return address
332 	 * in the stack address, so we "emulate" one in _mcount's stack space,
333 	 * and hijack it directly.
334 	 * For a non-leaf function, it does save the return address to its own
335 	 * stack space, so we can not hijack it directly, but need to find the
336 	 * real stack address, which is done by ftrace_get_parent_addr().
337 	 *
338 	 * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
339 	 * non-leaf function, the location of the return address will be saved
340 	 * to $12 for us.
341 	 * For a leaf function, it just puts a zero into $12, so we handle
342 	 * it in ftrace_graph_caller() of mcount.S.
343 	 */
344 
345 	/* old_parent_ra = *parent_ra_addr; */
346 	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
347 	if (unlikely(faulted))
348 		goto out;
349 #ifndef KBUILD_MCOUNT_RA_ADDRESS
350 	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
351 			old_parent_ra, (unsigned long)parent_ra_addr, fp);
352 	/*
353 	 * If fails when getting the stack address of the non-leaf function's
354 	 * ra, stop function graph tracer and return
355 	 */
356 	if (parent_ra_addr == NULL)
357 		goto out;
358 #endif
359 	/* *parent_ra_addr = return_hooker; */
360 	safe_store_stack(return_hooker, parent_ra_addr, faulted);
361 	if (unlikely(faulted))
362 		goto out;
363 
364 	/*
365 	 * Get the recorded ip of the current mcount calling site in the
366 	 * __mcount_loc section, which will be used to filter the function
367 	 * entries configured through the tracing/set_graph_function interface.
368 	 */
369 
370 	insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
371 	self_ra -= (MCOUNT_INSN_SIZE * insns);
372 
373 	if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
374 		*parent_ra_addr = old_parent_ra;
375 	return;
376 out:
377 	ftrace_graph_stop();
378 	WARN_ON(1);
379 }
380 #endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
381 
382 #ifdef CONFIG_FTRACE_SYSCALLS
383 
384 #ifdef CONFIG_32BIT
385 unsigned long __init arch_syscall_addr(int nr)
386 {
387 	return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
388 }
389 #endif
390 
391 #ifdef CONFIG_64BIT
392 
393 unsigned long __init arch_syscall_addr(int nr)
394 {
395 #ifdef CONFIG_MIPS32_N32
396 	if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
397 		return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
398 #endif
399 	if (nr >= __NR_64_Linux  && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
400 		return (unsigned long)sys_call_table[nr - __NR_64_Linux];
401 #ifdef CONFIG_MIPS32_O32
402 	if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
403 		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
404 #endif
405 
406 	return (unsigned long) &sys_ni_syscall;
407 }
408 #endif
409 
410 #endif /* CONFIG_FTRACE_SYSCALLS */
411