xref: /linux/arch/mips/kernel/ftrace.c (revision d5a51af940efec07c969bdb5fe478bb518116404)
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10 
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14 
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19 
20 #include <asm-generic/sections.h>
21 
22 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23 #define MCOUNT_OFFSET_INSNS 5
24 #else
25 #define MCOUNT_OFFSET_INSNS 4
26 #endif
27 
28 #ifdef CONFIG_DYNAMIC_FTRACE
29 
30 /* Arch override because MIPS doesn't need to run this from stop_machine() */
31 void arch_ftrace_update_code(int command)
32 {
33 	ftrace_modify_all_code(command);
34 }
35 
36 #endif
37 
38 /*
39  * Check if the address is in kernel space
40  *
41  * Clone core_kernel_text() from kernel/extable.c, but doesn't call
42  * init_kernel_text() for Ftrace doesn't trace functions in init sections.
43  */
44 static inline int in_kernel_space(unsigned long ip)
45 {
46 	if (ip >= (unsigned long)_stext &&
47 	    ip <= (unsigned long)_etext)
48 		return 1;
49 	return 0;
50 }
51 
52 #ifdef CONFIG_DYNAMIC_FTRACE
53 
54 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
55 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
56 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
57 
58 #define INSN_NOP 0x00000000	/* nop */
59 #define INSN_JAL(addr)	\
60 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
61 
62 static unsigned int insn_jal_ftrace_caller __read_mostly;
63 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
64 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
65 
66 static inline void ftrace_dyn_arch_init_insns(void)
67 {
68 	u32 *buf;
69 	unsigned int v1;
70 
71 	/* lui v1, hi16_mcount */
72 	v1 = 3;
73 	buf = (u32 *)&insn_lui_v1_hi16_mcount;
74 	UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
75 
76 	/* jal (ftrace_caller + 8), jump over the first two instruction */
77 	buf = (u32 *)&insn_jal_ftrace_caller;
78 	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
79 
80 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
81 	/* j ftrace_graph_caller */
82 	buf = (u32 *)&insn_j_ftrace_graph_caller;
83 	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
84 #endif
85 }
86 
87 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
88 {
89 	int faulted;
90 
91 	/* *(unsigned int *)ip = new_code; */
92 	safe_store_code(new_code, ip, faulted);
93 
94 	if (unlikely(faulted))
95 		return -EFAULT;
96 
97 	flush_icache_range(ip, ip + 8);
98 
99 	return 0;
100 }
101 
102 #ifndef CONFIG_64BIT
103 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
104 				unsigned int new_code2)
105 {
106 	int faulted;
107 
108 	safe_store_code(new_code1, ip, faulted);
109 	if (unlikely(faulted))
110 		return -EFAULT;
111 	ip += 4;
112 	safe_store_code(new_code2, ip, faulted);
113 	if (unlikely(faulted))
114 		return -EFAULT;
115 	flush_icache_range(ip, ip + 8); /* original ip + 12 */
116 	return 0;
117 }
118 #endif
119 
120 /*
121  * The details about the calling site of mcount on MIPS
122  *
123  * 1. For kernel:
124  *
125  * move at, ra
126  * jal _mcount		--> nop
127  *
128  * 2. For modules:
129  *
130  * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
131  *
132  * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
133  * addiu v1, v1, low_16bit_of_mcount
134  * move at, ra
135  * move $12, ra_address
136  * jalr v1
137  *  sub sp, sp, 8
138  *				    1: offset = 5 instructions
139  * 2.2 For the Other situations
140  *
141  * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
142  * addiu v1, v1, low_16bit_of_mcount
143  * move at, ra
144  * jalr v1
145  *  nop | move $12, ra_address | sub sp, sp, 8
146  *				    1: offset = 4 instructions
147  */
148 
149 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
150 
151 int ftrace_make_nop(struct module *mod,
152 		    struct dyn_ftrace *rec, unsigned long addr)
153 {
154 	unsigned int new;
155 	unsigned long ip = rec->ip;
156 
157 	/*
158 	 * If ip is in kernel space, no long call, otherwise, long call is
159 	 * needed.
160 	 */
161 	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
162 #ifdef CONFIG_64BIT
163 	return ftrace_modify_code(ip, new);
164 #else
165 	/*
166 	 * On 32 bit MIPS platforms, gcc adds a stack adjust
167 	 * instruction in the delay slot after the branch to
168 	 * mcount and expects mcount to restore the sp on return.
169 	 * This is based on a legacy API and does nothing but
170 	 * waste instructions so it's being removed at runtime.
171 	 */
172 	return ftrace_modify_code_2(ip, new, INSN_NOP);
173 #endif
174 }
175 
176 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
177 {
178 	unsigned int new;
179 	unsigned long ip = rec->ip;
180 
181 	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
182 		insn_lui_v1_hi16_mcount;
183 
184 	return ftrace_modify_code(ip, new);
185 }
186 
187 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
188 
189 int ftrace_update_ftrace_func(ftrace_func_t func)
190 {
191 	unsigned int new;
192 
193 	new = INSN_JAL((unsigned long)func);
194 
195 	return ftrace_modify_code(FTRACE_CALL_IP, new);
196 }
197 
198 int __init ftrace_dyn_arch_init(void *data)
199 {
200 	/* Encode the instructions when booting */
201 	ftrace_dyn_arch_init_insns();
202 
203 	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
204 	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
205 
206 	/* The return code is retured via data */
207 	*(unsigned long *)data = 0;
208 
209 	return 0;
210 }
211 #endif	/* CONFIG_DYNAMIC_FTRACE */
212 
213 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
214 
215 #ifdef CONFIG_DYNAMIC_FTRACE
216 
217 extern void ftrace_graph_call(void);
218 #define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
219 
220 int ftrace_enable_ftrace_graph_caller(void)
221 {
222 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
223 			insn_j_ftrace_graph_caller);
224 }
225 
226 int ftrace_disable_ftrace_graph_caller(void)
227 {
228 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
229 }
230 
231 #endif	/* CONFIG_DYNAMIC_FTRACE */
232 
233 #ifndef KBUILD_MCOUNT_RA_ADDRESS
234 
235 #define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */
236 #define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
237 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
238 
239 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
240 		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
241 {
242 	unsigned long sp, ip, tmp;
243 	unsigned int code;
244 	int faulted;
245 
246 	/*
247 	 * For module, move the ip from the return address after the
248 	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
249 	 * kernel, move after the instruction "move ra, at"(offset is 16)
250 	 */
251 	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
252 
253 	/*
254 	 * search the text until finding the non-store instruction or "s{d,w}
255 	 * ra, offset(sp)" instruction
256 	 */
257 	do {
258 		/* get the code at "ip": code = *(unsigned int *)ip; */
259 		safe_load_code(code, ip, faulted);
260 
261 		if (unlikely(faulted))
262 			return 0;
263 		/*
264 		 * If we hit the non-store instruction before finding where the
265 		 * ra is stored, then this is a leaf function and it does not
266 		 * store the ra on the stack
267 		 */
268 		if ((code & S_R_SP) != S_R_SP)
269 			return parent_ra_addr;
270 
271 		/* Move to the next instruction */
272 		ip -= 4;
273 	} while ((code & S_RA_SP) != S_RA_SP);
274 
275 	sp = fp + (code & OFFSET_MASK);
276 
277 	/* tmp = *(unsigned long *)sp; */
278 	safe_load_stack(tmp, sp, faulted);
279 	if (unlikely(faulted))
280 		return 0;
281 
282 	if (tmp == old_parent_ra)
283 		return sp;
284 	return 0;
285 }
286 
287 #endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
288 
289 /*
290  * Hook the return address and push it in the stack of return addrs
291  * in current thread info.
292  */
293 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
294 			   unsigned long fp)
295 {
296 	unsigned long old_parent_ra;
297 	struct ftrace_graph_ent trace;
298 	unsigned long return_hooker = (unsigned long)
299 	    &return_to_handler;
300 	int faulted, insns;
301 
302 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
303 		return;
304 
305 	/*
306 	 * "parent_ra_addr" is the stack address saved the return address of
307 	 * the caller of _mcount.
308 	 *
309 	 * if the gcc < 4.5, a leaf function does not save the return address
310 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
311 	 * and hijack it directly, but for a non-leaf function, it save the
312 	 * return address to the its own stack space, we can not hijack it
313 	 * directly, but need to find the real stack address,
314 	 * ftrace_get_parent_addr() does it!
315 	 *
316 	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
317 	 * non-leaf function, the location of the return address will be saved
318 	 * to $12 for us, and for a leaf function, only put a zero into $12. we
319 	 * do it in ftrace_graph_caller of mcount.S.
320 	 */
321 
322 	/* old_parent_ra = *parent_ra_addr; */
323 	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
324 	if (unlikely(faulted))
325 		goto out;
326 #ifndef KBUILD_MCOUNT_RA_ADDRESS
327 	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
328 			old_parent_ra, (unsigned long)parent_ra_addr, fp);
329 	/*
330 	 * If fails when getting the stack address of the non-leaf function's
331 	 * ra, stop function graph tracer and return
332 	 */
333 	if (parent_ra_addr == 0)
334 		goto out;
335 #endif
336 	/* *parent_ra_addr = return_hooker; */
337 	safe_store_stack(return_hooker, parent_ra_addr, faulted);
338 	if (unlikely(faulted))
339 		goto out;
340 
341 	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
342 	    == -EBUSY) {
343 		*parent_ra_addr = old_parent_ra;
344 		return;
345 	}
346 
347 	/*
348 	 * Get the recorded ip of the current mcount calling site in the
349 	 * __mcount_loc section, which will be used to filter the function
350 	 * entries configured through the tracing/set_graph_function interface.
351 	 */
352 
353 	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
354 	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
355 
356 	/* Only trace if the calling function expects to */
357 	if (!ftrace_graph_entry(&trace)) {
358 		current->curr_ret_stack--;
359 		*parent_ra_addr = old_parent_ra;
360 	}
361 	return;
362 out:
363 	ftrace_graph_stop();
364 	WARN_ON(1);
365 }
366 #endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
367