xref: /linux/arch/riscv/kernel/ftrace.c (revision 119b1e61a769aa98e68599f44721661a4d8c55f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <linux/irqflags.h>
12 #include <linux/stop_machine.h>
13 #include <asm/cacheflush.h>
14 #include <asm/text-patching.h>
15 
16 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_call_adjust(unsigned long addr)17 unsigned long ftrace_call_adjust(unsigned long addr)
18 {
19 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
20 		return addr + 8 + MCOUNT_AUIPC_SIZE;
21 
22 	return addr + MCOUNT_AUIPC_SIZE;
23 }
24 
arch_ftrace_get_symaddr(unsigned long fentry_ip)25 unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
26 {
27 	return fentry_ip - MCOUNT_AUIPC_SIZE;
28 }
29 
arch_ftrace_update_code(int command)30 void arch_ftrace_update_code(int command)
31 {
32 	mutex_lock(&text_mutex);
33 	command |= FTRACE_MAY_SLEEP;
34 	ftrace_modify_all_code(command);
35 	mutex_unlock(&text_mutex);
36 	flush_icache_all();
37 }
38 
__ftrace_modify_call(unsigned long source,unsigned long target,bool validate)39 static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate)
40 {
41 	unsigned int call[2], offset;
42 	unsigned int replaced[2];
43 
44 	offset = target - source;
45 	call[1] = to_jalr_t0(offset);
46 
47 	if (validate) {
48 		call[0] = to_auipc_t0(offset);
49 		/*
50 		 * Read the text we want to modify;
51 		 * return must be -EFAULT on read error
52 		 */
53 		if (copy_from_kernel_nofault(replaced, (void *)source, 2 * MCOUNT_INSN_SIZE))
54 			return -EFAULT;
55 
56 		if (replaced[0] != call[0]) {
57 			pr_err("%p: expected (%08x) but got (%08x)\n",
58 			       (void *)source, call[0], replaced[0]);
59 			return -EINVAL;
60 		}
61 	}
62 
63 	/* Replace the jalr at once. Return -EPERM on write error. */
64 	if (patch_insn_write((void *)(source + MCOUNT_AUIPC_SIZE), call + 1, MCOUNT_JALR_SIZE))
65 		return -EPERM;
66 
67 	return 0;
68 }
69 
70 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
riscv64_rec_get_ops(struct dyn_ftrace * rec)71 static const struct ftrace_ops *riscv64_rec_get_ops(struct dyn_ftrace *rec)
72 {
73 	const struct ftrace_ops *ops = NULL;
74 
75 	if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
76 		ops = ftrace_find_unique_ops(rec);
77 		WARN_ON_ONCE(!ops);
78 	}
79 
80 	if (!ops)
81 		ops = &ftrace_list_ops;
82 
83 	return ops;
84 }
85 
ftrace_rec_set_ops(const struct dyn_ftrace * rec,const struct ftrace_ops * ops)86 static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, const struct ftrace_ops *ops)
87 {
88 	unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
89 
90 	return patch_text_nosync((void *)literal, &ops, sizeof(ops));
91 }
92 
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)93 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
94 {
95 	return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
96 }
97 
ftrace_rec_update_ops(struct dyn_ftrace * rec)98 static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
99 {
100 	return ftrace_rec_set_ops(rec, riscv64_rec_get_ops(rec));
101 }
102 #else
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)103 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
ftrace_rec_update_ops(struct dyn_ftrace * rec)104 static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
105 #endif
106 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)107 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
108 {
109 	unsigned long distance, orig_addr, pc = rec->ip - MCOUNT_AUIPC_SIZE;
110 	int ret;
111 
112 	ret = ftrace_rec_update_ops(rec);
113 	if (ret)
114 		return ret;
115 
116 	orig_addr = (unsigned long)&ftrace_caller;
117 	distance = addr > orig_addr ? addr - orig_addr : orig_addr - addr;
118 	if (distance > JALR_RANGE)
119 		addr = FTRACE_ADDR;
120 
121 	return __ftrace_modify_call(pc, addr, false);
122 }
123 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)124 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
125 {
126 	u32 nop4 = RISCV_INSN_NOP4;
127 	int ret;
128 
129 	ret = ftrace_rec_set_nop_ops(rec);
130 	if (ret)
131 		return ret;
132 
133 	if (patch_insn_write((void *)rec->ip, &nop4, MCOUNT_NOP4_SIZE))
134 		return -EPERM;
135 
136 	return 0;
137 }
138 
139 /*
140  * This is called early on, and isn't wrapped by
141  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
142  * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
143  * just directly poke the text, but it's simpler to just take the lock
144  * ourselves.
145  */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)146 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
147 {
148 	unsigned long pc = rec->ip - MCOUNT_AUIPC_SIZE;
149 	unsigned int nops[2], offset;
150 	int ret;
151 
152 	ret = ftrace_rec_set_nop_ops(rec);
153 	if (ret)
154 		return ret;
155 
156 	offset = (unsigned long) &ftrace_caller - pc;
157 	nops[0] = to_auipc_t0(offset);
158 	nops[1] = RISCV_INSN_NOP4;
159 
160 	mutex_lock(&text_mutex);
161 	ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
162 	mutex_unlock(&text_mutex);
163 
164 	return ret;
165 }
166 
167 ftrace_func_t ftrace_call_dest = ftrace_stub;
ftrace_update_ftrace_func(ftrace_func_t func)168 int ftrace_update_ftrace_func(ftrace_func_t func)
169 {
170 	/*
171 	 * When using CALL_OPS, the function to call is associated with the
172 	 * call site, and we don't have a global function pointer to update.
173 	 */
174 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
175 		return 0;
176 
177 	WRITE_ONCE(ftrace_call_dest, func);
178 	/*
179 	 * The data fence ensure that the update to ftrace_call_dest happens
180 	 * before the write to function_trace_op later in the generic ftrace.
181 	 * If the sequence is not enforced, then an old ftrace_call_dest may
182 	 * race loading a new function_trace_op set in ftrace_modify_all_code
183 	 */
184 	smp_wmb();
185 	/*
186 	 * Updating ftrace dpes not take stop_machine path, so irqs should not
187 	 * be disabled.
188 	 */
189 	WARN_ON(irqs_disabled());
190 	smp_call_function(ftrace_sync_ipi, NULL, 1);
191 	return 0;
192 }
193 
194 #else /* CONFIG_DYNAMIC_FTRACE */
ftrace_call_adjust(unsigned long addr)195 unsigned long ftrace_call_adjust(unsigned long addr)
196 {
197 	return addr;
198 }
199 #endif /* CONFIG_DYNAMIC_FTRACE */
200 
201 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)202 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
203 		       unsigned long addr)
204 {
205 	unsigned long caller = rec->ip - MCOUNT_AUIPC_SIZE;
206 	int ret;
207 
208 	ret = ftrace_rec_update_ops(rec);
209 	if (ret)
210 		return ret;
211 
212 	return __ftrace_modify_call(caller, FTRACE_ADDR, true);
213 }
214 #endif
215 
216 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
217 /*
218  * Most of this function is copied from arm64.
219  */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)220 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
221 			   unsigned long frame_pointer)
222 {
223 	unsigned long return_hooker = (unsigned long)&return_to_handler;
224 	unsigned long old;
225 
226 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
227 		return;
228 
229 	/*
230 	 * We don't suffer access faults, so no extra fault-recovery assembly
231 	 * is needed here.
232 	 */
233 	old = *parent;
234 
235 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
236 		*parent = return_hooker;
237 }
238 
239 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)240 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
241 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
242 {
243 	unsigned long return_hooker = (unsigned long)&return_to_handler;
244 	unsigned long frame_pointer = arch_ftrace_regs(fregs)->s0;
245 	unsigned long *parent = &arch_ftrace_regs(fregs)->ra;
246 	unsigned long old;
247 
248 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
249 		return;
250 
251 	/*
252 	 * We don't suffer access faults, so no extra fault-recovery assembly
253 	 * is needed here.
254 	 */
255 	old = *parent;
256 
257 	if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs))
258 		*parent = return_hooker;
259 }
260 #endif /* CONFIG_DYNAMIC_FTRACE */
261 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
262