xref: /linux/arch/loongarch/kernel/ftrace_dyn.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 
11 #include <asm/inst.h>
12 #include <asm/module.h>
13 
14 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
15 {
16 	u32 replaced;
17 
18 	if (validate) {
19 		if (larch_insn_read((void *)pc, &replaced))
20 			return -EFAULT;
21 
22 		if (replaced != old)
23 			return -EINVAL;
24 	}
25 
26 	if (larch_insn_patch_text((void *)pc, new))
27 		return -EPERM;
28 
29 	return 0;
30 }
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
33 
34 #ifdef CONFIG_MODULES
35 static inline int __get_mod(struct module **mod, unsigned long addr)
36 {
37 	preempt_disable();
38 	*mod = __module_text_address(addr);
39 	preempt_enable();
40 
41 	if (WARN_ON(!(*mod)))
42 		return -EINVAL;
43 
44 	return 0;
45 }
46 
47 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
48 {
49 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
50 
51 	if (addr == FTRACE_ADDR)
52 		return &plt[FTRACE_PLT_IDX];
53 	if (addr == FTRACE_REGS_ADDR &&
54 			IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
55 		return &plt[FTRACE_REGS_PLT_IDX];
56 
57 	return NULL;
58 }
59 
60 static unsigned long get_plt_addr(struct module *mod, unsigned long addr)
61 {
62 	struct plt_entry *plt;
63 
64 	plt = get_ftrace_plt(mod, addr);
65 	if (!plt) {
66 		pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
67 		return -EINVAL;
68 	}
69 
70 	return (unsigned long)plt;
71 }
72 #endif
73 
74 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
75 {
76 	u32 old, new;
77 	unsigned long pc;
78 	long offset __maybe_unused;
79 
80 	pc = rec->ip + LOONGARCH_INSN_SIZE;
81 
82 #ifdef CONFIG_MODULES
83 	offset = (long)pc - (long)addr;
84 
85 	if (offset < -SZ_128M || offset >= SZ_128M) {
86 		int ret;
87 		struct module *mod;
88 
89 		ret = __get_mod(&mod, pc);
90 		if (ret)
91 			return ret;
92 
93 		addr = get_plt_addr(mod, addr);
94 
95 		old_addr = get_plt_addr(mod, old_addr);
96 	}
97 #endif
98 
99 	new = larch_insn_gen_bl(pc, addr);
100 	old = larch_insn_gen_bl(pc, old_addr);
101 
102 	return ftrace_modify_code(pc, old, new, true);
103 }
104 
105 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
106 
107 int ftrace_update_ftrace_func(ftrace_func_t func)
108 {
109 	u32 new;
110 	unsigned long pc;
111 
112 	pc = (unsigned long)&ftrace_call;
113 	new = larch_insn_gen_bl(pc, (unsigned long)func);
114 
115 	return ftrace_modify_code(pc, 0, new, false);
116 }
117 
118 /*
119  * The compiler has inserted 2 NOPs before the regular function prologue.
120  * T series registers are available and safe because of LoongArch's psABI.
121  *
122  * At runtime, we can replace nop with bl to enable ftrace call and replace bl
123  * with nop to disable ftrace call. The bl requires us to save the original RA
124  * value, so it saves RA at t0 here.
125  *
126  * Details are:
127  *
128  * | Compiled   |       Disabled         |        Enabled         |
129  * +------------+------------------------+------------------------+
130  * | nop        | move     t0, ra        | move     t0, ra        |
131  * | nop        | nop                    | bl       ftrace_caller |
132  * | func_body  | func_body              | func_body              |
133  *
134  * The RA value will be recovered by ftrace_regs_entry, and restored into RA
135  * before returning to the regular function prologue. When a function is not
136  * being traced, the "move t0, ra" is not harmful.
137  */
138 
139 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
140 {
141 	u32 old, new;
142 	unsigned long pc;
143 
144 	pc = rec->ip;
145 	old = larch_insn_gen_nop();
146 	new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
147 
148 	return ftrace_modify_code(pc, old, new, true);
149 }
150 
151 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152 {
153 	u32 old, new;
154 	unsigned long pc;
155 	long offset __maybe_unused;
156 
157 	pc = rec->ip + LOONGARCH_INSN_SIZE;
158 
159 #ifdef CONFIG_MODULES
160 	offset = (long)pc - (long)addr;
161 
162 	if (offset < -SZ_128M || offset >= SZ_128M) {
163 		int ret;
164 		struct module *mod;
165 
166 		ret = __get_mod(&mod, pc);
167 		if (ret)
168 			return ret;
169 
170 		addr = get_plt_addr(mod, addr);
171 	}
172 #endif
173 
174 	old = larch_insn_gen_nop();
175 	new = larch_insn_gen_bl(pc, addr);
176 
177 	return ftrace_modify_code(pc, old, new, true);
178 }
179 
180 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
181 {
182 	u32 old, new;
183 	unsigned long pc;
184 	long offset __maybe_unused;
185 
186 	pc = rec->ip + LOONGARCH_INSN_SIZE;
187 
188 #ifdef CONFIG_MODULES
189 	offset = (long)pc - (long)addr;
190 
191 	if (offset < -SZ_128M || offset >= SZ_128M) {
192 		int ret;
193 		struct module *mod;
194 
195 		ret = __get_mod(&mod, pc);
196 		if (ret)
197 			return ret;
198 
199 		addr = get_plt_addr(mod, addr);
200 	}
201 #endif
202 
203 	new = larch_insn_gen_nop();
204 	old = larch_insn_gen_bl(pc, addr);
205 
206 	return ftrace_modify_code(pc, old, new, true);
207 }
208 
209 void arch_ftrace_update_code(int command)
210 {
211 	command |= FTRACE_MAY_SLEEP;
212 	ftrace_modify_all_code(command);
213 }
214 
215 int __init ftrace_dyn_arch_init(void)
216 {
217 	return 0;
218 }
219 
220 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
221 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
222 {
223 	unsigned long old;
224 	unsigned long return_hooker = (unsigned long)&return_to_handler;
225 
226 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
227 		return;
228 
229 	old = *parent;
230 
231 	if (!function_graph_enter(old, self_addr, 0, parent))
232 		*parent = return_hooker;
233 }
234 
235 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
236 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
237 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
238 {
239 	struct pt_regs *regs = &fregs->regs;
240 	unsigned long *parent = (unsigned long *)&regs->regs[1];
241 
242 	prepare_ftrace_return(ip, (unsigned long *)parent);
243 }
244 #else
245 static int ftrace_modify_graph_caller(bool enable)
246 {
247 	u32 branch, nop;
248 	unsigned long pc, func;
249 	extern void ftrace_graph_call(void);
250 
251 	pc = (unsigned long)&ftrace_graph_call;
252 	func = (unsigned long)&ftrace_graph_caller;
253 
254 	nop = larch_insn_gen_nop();
255 	branch = larch_insn_gen_b(pc, func);
256 
257 	if (enable)
258 		return ftrace_modify_code(pc, nop, branch, true);
259 	else
260 		return ftrace_modify_code(pc, branch, nop, true);
261 }
262 
263 int ftrace_enable_ftrace_graph_caller(void)
264 {
265 	return ftrace_modify_graph_caller(true);
266 }
267 
268 int ftrace_disable_ftrace_graph_caller(void)
269 {
270 	return ftrace_modify_graph_caller(false);
271 }
272 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
273 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
274