xref: /linux/arch/riscv/kernel/ftrace.c (revision 266aa3b4812e97942a8ce5c7aafa7da059f7b5b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <asm/cacheflush.h>
13 #include <asm/patch.h>
14 
15 #ifdef CONFIG_DYNAMIC_FTRACE
16 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
17 {
18 	mutex_lock(&text_mutex);
19 
20 	/*
21 	 * The code sequences we use for ftrace can't be patched while the
22 	 * kernel is running, so we need to use stop_machine() to modify them
23 	 * for now.  This doesn't play nice with text_mutex, we use this flag
24 	 * to elide the check.
25 	 */
26 	riscv_patch_in_stop_machine = true;
27 }
28 
29 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
30 {
31 	riscv_patch_in_stop_machine = false;
32 	mutex_unlock(&text_mutex);
33 }
34 
35 static int ftrace_check_current_call(unsigned long hook_pos,
36 				     unsigned int *expected)
37 {
38 	unsigned int replaced[2];
39 	unsigned int nops[2] = {NOP4, NOP4};
40 
41 	/* we expect nops at the hook position */
42 	if (!expected)
43 		expected = nops;
44 
45 	/*
46 	 * Read the text we want to modify;
47 	 * return must be -EFAULT on read error
48 	 */
49 	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
50 			MCOUNT_INSN_SIZE))
51 		return -EFAULT;
52 
53 	/*
54 	 * Make sure it is what we expect it to be;
55 	 * return must be -EINVAL on failed comparison
56 	 */
57 	if (memcmp(expected, replaced, sizeof(replaced))) {
58 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
59 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
60 		       replaced[1]);
61 		return -EINVAL;
62 	}
63 
64 	return 0;
65 }
66 
67 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
68 				bool enable, bool ra)
69 {
70 	unsigned int call[2];
71 	unsigned int nops[2] = {NOP4, NOP4};
72 
73 	if (ra)
74 		make_call_ra(hook_pos, target, call);
75 	else
76 		make_call_t0(hook_pos, target, call);
77 
78 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
79 	if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80 		return -EPERM;
81 
82 	return 0;
83 }
84 
85 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86 {
87 	unsigned int call[2];
88 
89 	make_call_t0(rec->ip, addr, call);
90 
91 	if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92 		return -EPERM;
93 
94 	return 0;
95 }
96 
97 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98 		    unsigned long addr)
99 {
100 	unsigned int nops[2] = {NOP4, NOP4};
101 
102 	if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103 		return -EPERM;
104 
105 	return 0;
106 }
107 
108 /*
109  * This is called early on, and isn't wrapped by
110  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
111  * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
112  * just directly poke the text, but it's simpler to just take the lock
113  * ourselves.
114  */
115 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
116 {
117 	int out;
118 
119 	mutex_lock(&text_mutex);
120 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
121 	mutex_unlock(&text_mutex);
122 
123 	return out;
124 }
125 
126 int ftrace_update_ftrace_func(ftrace_func_t func)
127 {
128 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
129 				       (unsigned long)func, true, true);
130 	if (!ret) {
131 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
132 					   (unsigned long)func, true, true);
133 	}
134 
135 	return ret;
136 }
137 
138 struct ftrace_modify_param {
139 	int command;
140 	atomic_t cpu_count;
141 };
142 
143 static int __ftrace_modify_code(void *data)
144 {
145 	struct ftrace_modify_param *param = data;
146 
147 	if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
148 		ftrace_modify_all_code(param->command);
149 		/*
150 		 * Make sure the patching store is effective *before* we
151 		 * increment the counter which releases all waiting CPUs
152 		 * by using the release variant of atomic increment. The
153 		 * release pairs with the call to local_flush_icache_all()
154 		 * on the waiting CPU.
155 		 */
156 		atomic_inc_return_release(&param->cpu_count);
157 	} else {
158 		while (atomic_read(&param->cpu_count) <= num_online_cpus())
159 			cpu_relax();
160 	}
161 
162 	local_flush_icache_all();
163 
164 	return 0;
165 }
166 
167 void arch_ftrace_update_code(int command)
168 {
169 	struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
170 
171 	stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
172 }
173 #endif
174 
175 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
176 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
177 		       unsigned long addr)
178 {
179 	unsigned int call[2];
180 	unsigned long caller = rec->ip;
181 	int ret;
182 
183 	make_call_t0(caller, old_addr, call);
184 	ret = ftrace_check_current_call(caller, call);
185 
186 	if (ret)
187 		return ret;
188 
189 	return __ftrace_modify_call(caller, addr, true, false);
190 }
191 #endif
192 
193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 /*
195  * Most of this function is copied from arm64.
196  */
197 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
198 			   unsigned long frame_pointer)
199 {
200 	unsigned long return_hooker = (unsigned long)&return_to_handler;
201 	unsigned long old;
202 
203 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
204 		return;
205 
206 	/*
207 	 * We don't suffer access faults, so no extra fault-recovery assembly
208 	 * is needed here.
209 	 */
210 	old = *parent;
211 
212 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
213 		*parent = return_hooker;
214 }
215 
216 #ifdef CONFIG_DYNAMIC_FTRACE
217 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
218 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
219 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
220 {
221 	struct pt_regs *regs = arch_ftrace_get_regs(fregs);
222 	unsigned long *parent = (unsigned long *)&regs->ra;
223 
224 	prepare_ftrace_return(parent, ip, frame_pointer(regs));
225 }
226 #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
227 extern void ftrace_graph_call(void);
228 int ftrace_enable_ftrace_graph_caller(void)
229 {
230 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
231 				    (unsigned long)&prepare_ftrace_return, true, true);
232 }
233 
234 int ftrace_disable_ftrace_graph_caller(void)
235 {
236 	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
237 				    (unsigned long)&prepare_ftrace_return, false, true);
238 }
239 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
240 #endif /* CONFIG_DYNAMIC_FTRACE */
241 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
242