xref: /linux/arch/sh/kernel/ftrace.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
4   * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
5   *
6   * Code for replacing ftrace calls with jumps.
7   *
8   * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9   *
10   * Thanks goes to Ingo Molnar, for suggesting the idea.
11   * Mathieu Desnoyers, for suggesting postponing the modifications.
12   * Arjan van de Ven, for keeping me straight, and explaining to me
13   * the dangers of modifying code on the run.
14   */
15  #include <linux/uaccess.h>
16  #include <linux/ftrace.h>
17  #include <linux/string.h>
18  #include <linux/init.h>
19  #include <linux/io.h>
20  #include <linux/kernel.h>
21  #include <asm/ftrace.h>
22  #include <asm/cacheflush.h>
23  #include <asm/unistd.h>
24  #include <trace/syscall.h>
25  
26  #ifdef CONFIG_DYNAMIC_FTRACE
27  static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
28  
29  static unsigned char ftrace_nop[4];
30  /*
31   * If we're trying to nop out a call to a function, we instead
32   * place a call to the address after the memory table.
33   *
34   * 8c011060 <a>:
35   * 8c011060:       02 d1           mov.l   8c01106c <a+0xc>,r1
36   * 8c011062:       22 4f           sts.l   pr,@-r15
37   * 8c011064:       02 c7           mova    8c011070 <a+0x10>,r0
38   * 8c011066:       2b 41           jmp     @r1
39   * 8c011068:       2a 40           lds     r0,pr
40   * 8c01106a:       09 00           nop
41   * 8c01106c:       68 24           .word 0x2468     <--- ip
42   * 8c01106e:       1d 8c           .word 0x8c1d
43   * 8c011070:       26 4f           lds.l   @r15+,pr <--- ip + MCOUNT_INSN_SIZE
44   *
45   * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
46   * past the _mcount call and continue executing code like normal.
47   */
ftrace_nop_replace(unsigned long ip)48  static unsigned char *ftrace_nop_replace(unsigned long ip)
49  {
50  	__raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
51  	return ftrace_nop;
52  }
53  
ftrace_call_replace(unsigned long ip,unsigned long addr)54  static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
55  {
56  	/* Place the address in the memory table. */
57  	__raw_writel(addr, ftrace_replaced_code);
58  
59  	/*
60  	 * No locking needed, this must be called via kstop_machine
61  	 * which in essence is like running on a uniprocessor machine.
62  	 */
63  	return ftrace_replaced_code;
64  }
65  
66  /*
67   * Modifying code must take extra care. On an SMP machine, if
68   * the code being modified is also being executed on another CPU
69   * that CPU will have undefined results and possibly take a GPF.
70   * We use kstop_machine to stop other CPUS from executing code.
71   * But this does not stop NMIs from happening. We still need
72   * to protect against that. We separate out the modification of
73   * the code to take care of this.
74   *
75   * Two buffers are added: An IP buffer and a "code" buffer.
76   *
77   * 1) Put the instruction pointer into the IP buffer
78   *    and the new code into the "code" buffer.
79   * 2) Wait for any running NMIs to finish and set a flag that says
80   *    we are modifying code, it is done in an atomic operation.
81   * 3) Write the code
82   * 4) clear the flag.
83   * 5) Wait for any running NMIs to finish.
84   *
85   * If an NMI is executed, the first thing it does is to call
86   * "ftrace_nmi_enter". This will check if the flag is set to write
87   * and if it is, it will write what is in the IP and "code" buffers.
88   *
89   * The trick is, it does not matter if everyone is writing the same
90   * content to the code location. Also, if a CPU is executing code
91   * it is OK to write to that code location if the contents being written
92   * are the same as what exists.
93   */
94  #define MOD_CODE_WRITE_FLAG (1 << 31)	/* set when NMI should do the write */
95  static atomic_t nmi_running = ATOMIC_INIT(0);
96  static int mod_code_status;		/* holds return value of text write */
97  static void *mod_code_ip;		/* holds the IP to write to */
98  static void *mod_code_newcode;		/* holds the text to write to the IP */
99  
clear_mod_flag(void)100  static void clear_mod_flag(void)
101  {
102  	int old = atomic_read(&nmi_running);
103  
104  	for (;;) {
105  		int new = old & ~MOD_CODE_WRITE_FLAG;
106  
107  		if (old == new)
108  			break;
109  
110  		old = atomic_cmpxchg(&nmi_running, old, new);
111  	}
112  }
113  
ftrace_mod_code(void)114  static void ftrace_mod_code(void)
115  {
116  	/*
117  	 * Yes, more than one CPU process can be writing to mod_code_status.
118  	 *    (and the code itself)
119  	 * But if one were to fail, then they all should, and if one were
120  	 * to succeed, then they all should.
121  	 */
122  	mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode,
123  					     MCOUNT_INSN_SIZE);
124  
125  	/* if we fail, then kill any new writers */
126  	if (mod_code_status)
127  		clear_mod_flag();
128  }
129  
arch_ftrace_nmi_enter(void)130  void arch_ftrace_nmi_enter(void)
131  {
132  	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
133  		smp_rmb();
134  		ftrace_mod_code();
135  	}
136  	/* Must have previous changes seen before executions */
137  	smp_mb();
138  }
139  
arch_ftrace_nmi_exit(void)140  void arch_ftrace_nmi_exit(void)
141  {
142  	/* Finish all executions before clearing nmi_running */
143  	smp_mb();
144  	atomic_dec(&nmi_running);
145  }
146  
wait_for_nmi_and_set_mod_flag(void)147  static void wait_for_nmi_and_set_mod_flag(void)
148  {
149  	if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
150  		return;
151  
152  	do {
153  		cpu_relax();
154  	} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
155  }
156  
wait_for_nmi(void)157  static void wait_for_nmi(void)
158  {
159  	if (!atomic_read(&nmi_running))
160  		return;
161  
162  	do {
163  		cpu_relax();
164  	} while (atomic_read(&nmi_running));
165  }
166  
167  static int
do_ftrace_mod_code(unsigned long ip,void * new_code)168  do_ftrace_mod_code(unsigned long ip, void *new_code)
169  {
170  	mod_code_ip = (void *)ip;
171  	mod_code_newcode = new_code;
172  
173  	/* The buffers need to be visible before we let NMIs write them */
174  	smp_mb();
175  
176  	wait_for_nmi_and_set_mod_flag();
177  
178  	/* Make sure all running NMIs have finished before we write the code */
179  	smp_mb();
180  
181  	ftrace_mod_code();
182  
183  	/* Make sure the write happens before clearing the bit */
184  	smp_mb();
185  
186  	clear_mod_flag();
187  	wait_for_nmi();
188  
189  	return mod_code_status;
190  }
191  
ftrace_modify_code(unsigned long ip,unsigned char * old_code,unsigned char * new_code)192  static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
193  		       unsigned char *new_code)
194  {
195  	unsigned char replaced[MCOUNT_INSN_SIZE];
196  
197  	/*
198  	 * Note:
199  	 * We are paranoid about modifying text, as if a bug was to happen, it
200  	 * could cause us to read or write to someplace that could cause harm.
201  	 * Carefully read and modify the code with probe_kernel_*(), and make
202  	 * sure what we read is what we expected it to be before modifying it.
203  	 */
204  
205  	/* read the text we want to modify */
206  	if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
207  		return -EFAULT;
208  
209  	/* Make sure it is what we expect it to be */
210  	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
211  		return -EINVAL;
212  
213  	/* replace the text with the new text */
214  	if (do_ftrace_mod_code(ip, new_code))
215  		return -EPERM;
216  
217  	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
218  
219  	return 0;
220  }
221  
ftrace_update_ftrace_func(ftrace_func_t func)222  int ftrace_update_ftrace_func(ftrace_func_t func)
223  {
224  	unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
225  	unsigned char old[MCOUNT_INSN_SIZE], *new;
226  
227  	memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
228  	new = ftrace_call_replace(ip, (unsigned long)func);
229  
230  	return ftrace_modify_code(ip, old, new);
231  }
232  
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)233  int ftrace_make_nop(struct module *mod,
234  		    struct dyn_ftrace *rec, unsigned long addr)
235  {
236  	unsigned char *new, *old;
237  	unsigned long ip = rec->ip;
238  
239  	old = ftrace_call_replace(ip, addr);
240  	new = ftrace_nop_replace(ip);
241  
242  	return ftrace_modify_code(rec->ip, old, new);
243  }
244  
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)245  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
246  {
247  	unsigned char *new, *old;
248  	unsigned long ip = rec->ip;
249  
250  	old = ftrace_nop_replace(ip);
251  	new = ftrace_call_replace(ip, addr);
252  
253  	return ftrace_modify_code(rec->ip, old, new);
254  }
255  #endif /* CONFIG_DYNAMIC_FTRACE */
256  
257  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
258  #ifdef CONFIG_DYNAMIC_FTRACE
259  extern void ftrace_graph_call(void);
260  
ftrace_mod(unsigned long ip,unsigned long old_addr,unsigned long new_addr)261  static int ftrace_mod(unsigned long ip, unsigned long old_addr,
262  		      unsigned long new_addr)
263  {
264  	unsigned char code[MCOUNT_INSN_SIZE];
265  
266  	if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE))
267  		return -EFAULT;
268  
269  	if (old_addr != __raw_readl((unsigned long *)code))
270  		return -EINVAL;
271  
272  	__raw_writel(new_addr, ip);
273  	return 0;
274  }
275  
ftrace_enable_ftrace_graph_caller(void)276  int ftrace_enable_ftrace_graph_caller(void)
277  {
278  	unsigned long ip, old_addr, new_addr;
279  
280  	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
281  	old_addr = (unsigned long)(&skip_trace);
282  	new_addr = (unsigned long)(&ftrace_graph_caller);
283  
284  	return ftrace_mod(ip, old_addr, new_addr);
285  }
286  
ftrace_disable_ftrace_graph_caller(void)287  int ftrace_disable_ftrace_graph_caller(void)
288  {
289  	unsigned long ip, old_addr, new_addr;
290  
291  	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
292  	old_addr = (unsigned long)(&ftrace_graph_caller);
293  	new_addr = (unsigned long)(&skip_trace);
294  
295  	return ftrace_mod(ip, old_addr, new_addr);
296  }
297  #endif /* CONFIG_DYNAMIC_FTRACE */
298  
299  /*
300   * Hook the return address and push it in the stack of return addrs
301   * in the current thread info.
302   *
303   * This is the main routine for the function graph tracer. The function
304   * graph tracer essentially works like this:
305   *
306   * parent is the stack address containing self_addr's return address.
307   * We pull the real return address out of parent and store it in
308   * current's ret_stack. Then, we replace the return address on the stack
309   * with the address of return_to_handler. self_addr is the function that
310   * called mcount.
311   *
312   * When self_addr returns, it will jump to return_to_handler which calls
313   * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
314   * return address off of current's ret_stack and jump to it.
315   */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr)316  void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
317  {
318  	unsigned long old;
319  	int faulted;
320  	unsigned long return_hooker = (unsigned long)&return_to_handler;
321  
322  	if (unlikely(ftrace_graph_is_dead()))
323  		return;
324  
325  	if (unlikely(atomic_read(&current->tracing_graph_pause)))
326  		return;
327  
328  	/*
329  	 * Protect against fault, even if it shouldn't
330  	 * happen. This tool is too much intrusive to
331  	 * ignore such a protection.
332  	 */
333  	__asm__ __volatile__(
334  		"1:						\n\t"
335  		"mov.l		@%2, %0				\n\t"
336  		"2:						\n\t"
337  		"mov.l		%3, @%2				\n\t"
338  		"mov		#0, %1				\n\t"
339  		"3:						\n\t"
340  		".section .fixup, \"ax\"			\n\t"
341  		"4:						\n\t"
342  		"mov.l		5f, %0				\n\t"
343  		"jmp		@%0				\n\t"
344  		" mov		#1, %1				\n\t"
345  		".balign 4					\n\t"
346  		"5:	.long 3b				\n\t"
347  		".previous					\n\t"
348  		".section __ex_table,\"a\"			\n\t"
349  		".long 1b, 4b					\n\t"
350  		".long 2b, 4b					\n\t"
351  		".previous					\n\t"
352  		: "=&r" (old), "=r" (faulted)
353  		: "r" (parent), "r" (return_hooker)
354  	);
355  
356  	if (unlikely(faulted)) {
357  		ftrace_graph_stop();
358  		WARN_ON(1);
359  		return;
360  	}
361  
362  	if (function_graph_enter(old, self_addr, 0, NULL))
363  		__raw_writel(old, parent);
364  }
365  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
366