xref: /linux/arch/arm/kernel/ftrace.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 /*
2  * Dynamic function tracing support.
3  *
4  * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5  * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6  *
7  * For licencing details, see COPYING.
8  *
9  * Defines low-level handling of mcount calls when the kernel
10  * is compiled with the -pg flag. When using dynamic ftrace, the
11  * mcount call-sites get patched with NOP till they are enabled.
12  * All code mutation routines here are called under stop_machine().
13  */
14 
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
17 #include <linux/module.h>
18 #include <linux/stop_machine.h>
19 
20 #include <asm/cacheflush.h>
21 #include <asm/opcodes.h>
22 #include <asm/ftrace.h>
23 #include <asm/insn.h>
24 #include <asm/set_memory.h>
25 #include <asm/stacktrace.h>
26 #include <asm/patch.h>
27 
28 /*
29  * The compiler emitted profiling hook consists of
30  *
31  *   PUSH    {LR}
32  *   BL	     __gnu_mcount_nc
33  *
34  * To turn this combined sequence into a NOP, we need to restore the value of
35  * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
36  * modified anyway, and reloading LR from memory is highly likely to be less
37  * efficient.
38  */
39 #ifdef CONFIG_THUMB2_KERNEL
40 #define	NOP		0xf10d0d04	/* add.w sp, sp, #4 */
41 #else
42 #define	NOP		0xe28dd004	/* add   sp, sp, #4 */
43 #endif
44 
45 #ifdef CONFIG_DYNAMIC_FTRACE
46 
47 static int __ftrace_modify_code(void *data)
48 {
49 	int *command = data;
50 
51 	ftrace_modify_all_code(*command);
52 
53 	return 0;
54 }
55 
56 void arch_ftrace_update_code(int command)
57 {
58 	stop_machine(__ftrace_modify_code, &command, NULL);
59 }
60 
61 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
62 {
63 	return NOP;
64 }
65 
66 void ftrace_caller_from_init(void);
67 void ftrace_regs_caller_from_init(void);
68 
69 static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
70 					  unsigned long addr)
71 {
72 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
73 	    system_state >= SYSTEM_FREEING_INITMEM ||
74 	    likely(!is_kernel_inittext(rec->ip)))
75 		return addr;
76 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
77 	    addr == (unsigned long)&ftrace_caller)
78 		return (unsigned long)&ftrace_caller_from_init;
79 	return (unsigned long)&ftrace_regs_caller_from_init;
80 }
81 
82 void ftrace_arch_code_modify_prepare(void)
83 {
84 }
85 
86 void ftrace_arch_code_modify_post_process(void)
87 {
88 	/* Make sure any TLB misses during machine stop are cleared. */
89 	flush_tlb_all();
90 }
91 
92 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
93 					 bool warn)
94 {
95 	return arm_gen_branch_link(pc, addr, warn);
96 }
97 
98 static int ftrace_modify_code(unsigned long pc, unsigned long old,
99 			      unsigned long new, bool validate)
100 {
101 	unsigned long replaced;
102 
103 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
104 		old = __opcode_to_mem_thumb32(old);
105 	else
106 		old = __opcode_to_mem_arm(old);
107 
108 	if (validate) {
109 		if (copy_from_kernel_nofault(&replaced, (void *)pc,
110 				MCOUNT_INSN_SIZE))
111 			return -EFAULT;
112 
113 		if (replaced != old)
114 			return -EINVAL;
115 	}
116 
117 	__patch_text((void *)pc, new);
118 
119 	return 0;
120 }
121 
122 int ftrace_update_ftrace_func(ftrace_func_t func)
123 {
124 	unsigned long pc;
125 	unsigned long new;
126 	int ret;
127 
128 	pc = (unsigned long)&ftrace_call;
129 	new = ftrace_call_replace(pc, (unsigned long)func, true);
130 
131 	ret = ftrace_modify_code(pc, 0, new, false);
132 
133 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
134 	if (!ret) {
135 		pc = (unsigned long)&ftrace_regs_call;
136 		new = ftrace_call_replace(pc, (unsigned long)func, true);
137 
138 		ret = ftrace_modify_code(pc, 0, new, false);
139 	}
140 #endif
141 
142 	return ret;
143 }
144 
145 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
146 {
147 	unsigned long new, old;
148 	unsigned long ip = rec->ip;
149 	unsigned long aaddr = adjust_address(rec, addr);
150 	struct module *mod = NULL;
151 
152 #ifdef CONFIG_ARM_MODULE_PLTS
153 	mod = rec->arch.mod;
154 #endif
155 
156 	old = ftrace_nop_replace(rec);
157 
158 	new = ftrace_call_replace(ip, aaddr, !mod);
159 #ifdef CONFIG_ARM_MODULE_PLTS
160 	if (!new && mod) {
161 		aaddr = get_module_plt(mod, ip, aaddr);
162 		new = ftrace_call_replace(ip, aaddr, true);
163 	}
164 #endif
165 
166 	return ftrace_modify_code(rec->ip, old, new, true);
167 }
168 
169 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
170 
171 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
172 				unsigned long addr)
173 {
174 	unsigned long new, old;
175 	unsigned long ip = rec->ip;
176 
177 	old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
178 
179 	new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
180 
181 	return ftrace_modify_code(rec->ip, old, new, true);
182 }
183 
184 #endif
185 
186 int ftrace_make_nop(struct module *mod,
187 		    struct dyn_ftrace *rec, unsigned long addr)
188 {
189 	unsigned long aaddr = adjust_address(rec, addr);
190 	unsigned long ip = rec->ip;
191 	unsigned long old;
192 	unsigned long new;
193 	int ret;
194 
195 #ifdef CONFIG_ARM_MODULE_PLTS
196 	/* mod is only supplied during module loading */
197 	if (!mod)
198 		mod = rec->arch.mod;
199 	else
200 		rec->arch.mod = mod;
201 #endif
202 
203 	old = ftrace_call_replace(ip, aaddr,
204 				  !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
205 #ifdef CONFIG_ARM_MODULE_PLTS
206 	if (!old && mod) {
207 		aaddr = get_module_plt(mod, ip, aaddr);
208 		old = ftrace_call_replace(ip, aaddr, true);
209 	}
210 #endif
211 
212 	new = ftrace_nop_replace(rec);
213 	/*
214 	 * Locations in .init.text may call __gnu_mcount_mc via a linker
215 	 * emitted veneer if they are too far away from its implementation, and
216 	 * so validation may fail spuriously in such cases. Let's work around
217 	 * this by omitting those from validation.
218 	 */
219 	ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
220 
221 	return ret;
222 }
223 #endif /* CONFIG_DYNAMIC_FTRACE */
224 
225 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
226 asmlinkage
227 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
228 			   unsigned long frame_pointer,
229 			   unsigned long stack_pointer)
230 {
231 	unsigned long return_hooker = (unsigned long) &return_to_handler;
232 	unsigned long old;
233 
234 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
235 err_out:
236 		return;
237 
238 	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
239 		/*
240 		 * Usually, the stack frames are contiguous in memory but cases
241 		 * have been observed where the next stack frame does not live
242 		 * at 'frame_pointer + 4' as this code used to assume.
243 		 *
244 		 * Instead, dereference the field in the stack frame that
245 		 * stores the SP of the calling frame: to avoid unbounded
246 		 * recursion, this cannot involve any ftrace instrumented
247 		 * functions, so use the __get_kernel_nofault() primitive
248 		 * directly.
249 		 */
250 		__get_kernel_nofault(&frame_pointer,
251 				     (unsigned long *)(frame_pointer - 8),
252 				     unsigned long, err_out);
253 	} else {
254 		struct stackframe frame = {
255 			.fp = frame_pointer,
256 			.sp = stack_pointer,
257 			.lr = self_addr,
258 			.pc = self_addr,
259 		};
260 		if (unwind_frame(&frame) < 0)
261 			return;
262 		if (frame.lr != self_addr)
263 			parent = frame.lr_addr;
264 		frame_pointer = frame.sp;
265 	}
266 
267 	old = *parent;
268 	*parent = return_hooker;
269 
270 	if (function_graph_enter(old, self_addr, frame_pointer, NULL))
271 		*parent = old;
272 }
273 
274 #ifdef CONFIG_DYNAMIC_FTRACE
275 extern unsigned long ftrace_graph_call;
276 extern unsigned long ftrace_graph_call_old;
277 extern void ftrace_graph_caller_old(void);
278 extern unsigned long ftrace_graph_regs_call;
279 extern void ftrace_graph_regs_caller(void);
280 
281 static int __ftrace_modify_caller(unsigned long *callsite,
282 				  void (*func) (void), bool enable)
283 {
284 	unsigned long caller_fn = (unsigned long) func;
285 	unsigned long pc = (unsigned long) callsite;
286 	unsigned long branch = arm_gen_branch(pc, caller_fn);
287 	unsigned long nop = arm_gen_nop();
288 	unsigned long old = enable ? nop : branch;
289 	unsigned long new = enable ? branch : nop;
290 
291 	return ftrace_modify_code(pc, old, new, true);
292 }
293 
294 static int ftrace_modify_graph_caller(bool enable)
295 {
296 	int ret;
297 
298 	ret = __ftrace_modify_caller(&ftrace_graph_call,
299 				     ftrace_graph_caller,
300 				     enable);
301 
302 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
303 	if (!ret)
304 		ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
305 				     ftrace_graph_regs_caller,
306 				     enable);
307 #endif
308 
309 
310 	return ret;
311 }
312 
313 int ftrace_enable_ftrace_graph_caller(void)
314 {
315 	return ftrace_modify_graph_caller(true);
316 }
317 
318 int ftrace_disable_ftrace_graph_caller(void)
319 {
320 	return ftrace_modify_graph_caller(false);
321 }
322 #endif /* CONFIG_DYNAMIC_FTRACE */
323 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
324