1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <asm/cacheflush.h>
13 #include <asm/patch.h>
14
15 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)16 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
17 {
18 mutex_lock(&text_mutex);
19
20 /*
21 * The code sequences we use for ftrace can't be patched while the
22 * kernel is running, so we need to use stop_machine() to modify them
23 * for now. This doesn't play nice with text_mutex, we use this flag
24 * to elide the check.
25 */
26 riscv_patch_in_stop_machine = true;
27 }
28
ftrace_arch_code_modify_post_process(void)29 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
30 {
31 riscv_patch_in_stop_machine = false;
32 mutex_unlock(&text_mutex);
33 }
34
ftrace_check_current_call(unsigned long hook_pos,unsigned int * expected)35 static int ftrace_check_current_call(unsigned long hook_pos,
36 unsigned int *expected)
37 {
38 unsigned int replaced[2];
39 unsigned int nops[2] = {NOP4, NOP4};
40
41 /* we expect nops at the hook position */
42 if (!expected)
43 expected = nops;
44
45 /*
46 * Read the text we want to modify;
47 * return must be -EFAULT on read error
48 */
49 if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
50 MCOUNT_INSN_SIZE))
51 return -EFAULT;
52
53 /*
54 * Make sure it is what we expect it to be;
55 * return must be -EINVAL on failed comparison
56 */
57 if (memcmp(expected, replaced, sizeof(replaced))) {
58 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
59 (void *)hook_pos, expected[0], expected[1], replaced[0],
60 replaced[1]);
61 return -EINVAL;
62 }
63
64 return 0;
65 }
66
__ftrace_modify_call(unsigned long hook_pos,unsigned long target,bool enable,bool ra)67 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
68 bool enable, bool ra)
69 {
70 unsigned int call[2];
71 unsigned int nops[2] = {NOP4, NOP4};
72
73 if (ra)
74 make_call_ra(hook_pos, target, call);
75 else
76 make_call_t0(hook_pos, target, call);
77
78 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
79 if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80 return -EPERM;
81
82 return 0;
83 }
84
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)85 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86 {
87 unsigned int call[2];
88
89 make_call_t0(rec->ip, addr, call);
90
91 if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92 return -EPERM;
93
94 return 0;
95 }
96
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)97 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98 unsigned long addr)
99 {
100 unsigned int nops[2] = {NOP4, NOP4};
101
102 if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103 return -EPERM;
104
105 return 0;
106 }
107
108 /*
109 * This is called early on, and isn't wrapped by
110 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
111 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
112 * just directly poke the text, but it's simpler to just take the lock
113 * ourselves.
114 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)115 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
116 {
117 int out;
118
119 mutex_lock(&text_mutex);
120 out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
121 mutex_unlock(&text_mutex);
122
123 return out;
124 }
125
ftrace_update_ftrace_func(ftrace_func_t func)126 int ftrace_update_ftrace_func(ftrace_func_t func)
127 {
128 int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
129 (unsigned long)func, true, true);
130
131 return ret;
132 }
133
134 struct ftrace_modify_param {
135 int command;
136 atomic_t cpu_count;
137 };
138
__ftrace_modify_code(void * data)139 static int __ftrace_modify_code(void *data)
140 {
141 struct ftrace_modify_param *param = data;
142
143 if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
144 ftrace_modify_all_code(param->command);
145 /*
146 * Make sure the patching store is effective *before* we
147 * increment the counter which releases all waiting CPUs
148 * by using the release variant of atomic increment. The
149 * release pairs with the call to local_flush_icache_all()
150 * on the waiting CPU.
151 */
152 atomic_inc_return_release(¶m->cpu_count);
153 } else {
154 while (atomic_read(¶m->cpu_count) <= num_online_cpus())
155 cpu_relax();
156
157 local_flush_icache_all();
158 }
159
160 return 0;
161 }
162
arch_ftrace_update_code(int command)163 void arch_ftrace_update_code(int command)
164 {
165 struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
166
167 stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
168 }
169 #endif
170
171 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)172 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
173 unsigned long addr)
174 {
175 unsigned int call[2];
176 unsigned long caller = rec->ip;
177 int ret;
178
179 make_call_t0(caller, old_addr, call);
180 ret = ftrace_check_current_call(caller, call);
181
182 if (ret)
183 return ret;
184
185 return __ftrace_modify_call(caller, addr, true, false);
186 }
187 #endif
188
189 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
190 /*
191 * Most of this function is copied from arm64.
192 */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)193 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
194 unsigned long frame_pointer)
195 {
196 unsigned long return_hooker = (unsigned long)&return_to_handler;
197 unsigned long old;
198
199 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
200 return;
201
202 /*
203 * We don't suffer access faults, so no extra fault-recovery assembly
204 * is needed here.
205 */
206 old = *parent;
207
208 if (!function_graph_enter(old, self_addr, frame_pointer, parent))
209 *parent = return_hooker;
210 }
211
212 #ifdef CONFIG_DYNAMIC_FTRACE
213 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)214 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
215 struct ftrace_ops *op, struct ftrace_regs *fregs)
216 {
217 prepare_ftrace_return(&fregs->ra, ip, fregs->s0);
218 }
219 #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
220 extern void ftrace_graph_call(void);
ftrace_enable_ftrace_graph_caller(void)221 int ftrace_enable_ftrace_graph_caller(void)
222 {
223 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
224 (unsigned long)&prepare_ftrace_return, true, true);
225 }
226
ftrace_disable_ftrace_graph_caller(void)227 int ftrace_disable_ftrace_graph_caller(void)
228 {
229 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
230 (unsigned long)&prepare_ftrace_return, false, true);
231 }
232 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
233 #endif /* CONFIG_DYNAMIC_FTRACE */
234 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
235