xref: /linux/arch/arm64/kernel/ftrace.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2013 Linaro Limited
6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7  */
8 
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
18 #include <asm/patching.h>
19 
20 #ifdef CONFIG_DYNAMIC_FTRACE
21 /*
22  * Replace a single instruction, which may be a branch or NOP.
23  * If @validate == true, a replaced instruction is checked against 'old'.
24  */
25 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
26 			      bool validate)
27 {
28 	u32 replaced;
29 
30 	/*
31 	 * Note:
32 	 * We are paranoid about modifying text, as if a bug were to happen, it
33 	 * could cause us to read or write to someplace that could cause harm.
34 	 * Carefully read and modify the code with aarch64_insn_*() which uses
35 	 * probe_kernel_*(), and make sure what we read is what we expected it
36 	 * to be before modifying it.
37 	 */
38 	if (validate) {
39 		if (aarch64_insn_read((void *)pc, &replaced))
40 			return -EFAULT;
41 
42 		if (replaced != old)
43 			return -EINVAL;
44 	}
45 	if (aarch64_insn_patch_text_nosync((void *)pc, new))
46 		return -EPERM;
47 
48 	return 0;
49 }
50 
51 /*
52  * Replace tracer function in ftrace_caller()
53  */
54 int ftrace_update_ftrace_func(ftrace_func_t func)
55 {
56 	unsigned long pc;
57 	u32 new;
58 
59 	pc = (unsigned long)function_nocfi(ftrace_call);
60 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
61 					  AARCH64_INSN_BRANCH_LINK);
62 
63 	return ftrace_modify_code(pc, 0, new, false);
64 }
65 
66 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
67 {
68 #ifdef CONFIG_ARM64_MODULE_PLTS
69 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
70 
71 	if (addr == FTRACE_ADDR)
72 		return &plt[FTRACE_PLT_IDX];
73 	if (addr == FTRACE_REGS_ADDR &&
74 	    IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
75 		return &plt[FTRACE_REGS_PLT_IDX];
76 #endif
77 	return NULL;
78 }
79 
80 /*
81  * Turn on the call to ftrace_caller() in instrumented function
82  */
83 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
84 {
85 	unsigned long pc = rec->ip;
86 	u32 old, new;
87 	long offset = (long)pc - (long)addr;
88 
89 	if (offset < -SZ_128M || offset >= SZ_128M) {
90 		struct module *mod;
91 		struct plt_entry *plt;
92 
93 		if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
94 			return -EINVAL;
95 
96 		/*
97 		 * On kernels that support module PLTs, the offset between the
98 		 * branch instruction and its target may legally exceed the
99 		 * range of an ordinary relative 'bl' opcode. In this case, we
100 		 * need to branch via a trampoline in the module.
101 		 *
102 		 * NOTE: __module_text_address() must be called with preemption
103 		 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
104 		 * retains its validity throughout the remainder of this code.
105 		 */
106 		preempt_disable();
107 		mod = __module_text_address(pc);
108 		preempt_enable();
109 
110 		if (WARN_ON(!mod))
111 			return -EINVAL;
112 
113 		plt = get_ftrace_plt(mod, addr);
114 		if (!plt) {
115 			pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
116 			return -EINVAL;
117 		}
118 
119 		addr = (unsigned long)plt;
120 	}
121 
122 	old = aarch64_insn_gen_nop();
123 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
124 
125 	return ftrace_modify_code(pc, old, new, true);
126 }
127 
128 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
129 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
130 			unsigned long addr)
131 {
132 	unsigned long pc = rec->ip;
133 	u32 old, new;
134 
135 	old = aarch64_insn_gen_branch_imm(pc, old_addr,
136 					  AARCH64_INSN_BRANCH_LINK);
137 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
138 
139 	return ftrace_modify_code(pc, old, new, true);
140 }
141 
142 /*
143  * The compiler has inserted two NOPs before the regular function prologue.
144  * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
145  * and x9-x18 are free for our use.
146  *
147  * At runtime we want to be able to swing a single NOP <-> BL to enable or
148  * disable the ftrace call. The BL requires us to save the original LR value,
149  * so here we insert a <MOV X9, LR> over the first NOP so the instructions
150  * before the regular prologue are:
151  *
152  * | Compiled | Disabled   | Enabled    |
153  * +----------+------------+------------+
154  * | NOP      | MOV X9, LR | MOV X9, LR |
155  * | NOP      | NOP        | BL <entry> |
156  *
157  * The LR value will be recovered by ftrace_regs_entry, and restored into LR
158  * before returning to the regular function prologue. When a function is not
159  * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
160  *
161  * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
162  * the BL.
163  */
164 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
165 {
166 	unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
167 	u32 old, new;
168 
169 	old = aarch64_insn_gen_nop();
170 	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
171 					AARCH64_INSN_REG_LR,
172 					AARCH64_INSN_VARIANT_64BIT);
173 	return ftrace_modify_code(pc, old, new, true);
174 }
175 #endif
176 
177 /*
178  * Turn off the call to ftrace_caller() in instrumented function
179  */
180 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
181 		    unsigned long addr)
182 {
183 	unsigned long pc = rec->ip;
184 	bool validate = true;
185 	u32 old = 0, new;
186 	long offset = (long)pc - (long)addr;
187 
188 	if (offset < -SZ_128M || offset >= SZ_128M) {
189 		u32 replaced;
190 
191 		if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
192 			return -EINVAL;
193 
194 		/*
195 		 * 'mod' is only set at module load time, but if we end up
196 		 * dealing with an out-of-range condition, we can assume it
197 		 * is due to a module being loaded far away from the kernel.
198 		 */
199 		if (!mod) {
200 			preempt_disable();
201 			mod = __module_text_address(pc);
202 			preempt_enable();
203 
204 			if (WARN_ON(!mod))
205 				return -EINVAL;
206 		}
207 
208 		/*
209 		 * The instruction we are about to patch may be a branch and
210 		 * link instruction that was redirected via a PLT entry. In
211 		 * this case, the normal validation will fail, but we can at
212 		 * least check that we are dealing with a branch and link
213 		 * instruction that points into the right module.
214 		 */
215 		if (aarch64_insn_read((void *)pc, &replaced))
216 			return -EFAULT;
217 
218 		if (!aarch64_insn_is_bl(replaced) ||
219 		    !within_module(pc + aarch64_get_branch_offset(replaced),
220 				   mod))
221 			return -EINVAL;
222 
223 		validate = false;
224 	} else {
225 		old = aarch64_insn_gen_branch_imm(pc, addr,
226 						  AARCH64_INSN_BRANCH_LINK);
227 	}
228 
229 	new = aarch64_insn_gen_nop();
230 
231 	return ftrace_modify_code(pc, old, new, validate);
232 }
233 
234 void arch_ftrace_update_code(int command)
235 {
236 	command |= FTRACE_MAY_SLEEP;
237 	ftrace_modify_all_code(command);
238 }
239 #endif /* CONFIG_DYNAMIC_FTRACE */
240 
241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
242 /*
243  * function_graph tracer expects ftrace_return_to_handler() to be called
244  * on the way back to parent. For this purpose, this function is called
245  * in _mcount() or ftrace_caller() to replace return address (*parent) on
246  * the call stack to return_to_handler.
247  */
248 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
249 			   unsigned long frame_pointer)
250 {
251 	unsigned long return_hooker = (unsigned long)&return_to_handler;
252 	unsigned long old;
253 
254 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
255 		return;
256 
257 	/*
258 	 * Note:
259 	 * No protection against faulting at *parent, which may be seen
260 	 * on other archs. It's unlikely on AArch64.
261 	 */
262 	old = *parent;
263 
264 	if (!function_graph_enter(old, self_addr, frame_pointer,
265 	    (void *)frame_pointer)) {
266 		*parent = return_hooker;
267 	}
268 }
269 
270 #ifdef CONFIG_DYNAMIC_FTRACE
271 /*
272  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
273  * depending on @enable.
274  */
275 static int ftrace_modify_graph_caller(bool enable)
276 {
277 	unsigned long pc = (unsigned long)&ftrace_graph_call;
278 	u32 branch, nop;
279 
280 	branch = aarch64_insn_gen_branch_imm(pc,
281 					     (unsigned long)ftrace_graph_caller,
282 					     AARCH64_INSN_BRANCH_NOLINK);
283 	nop = aarch64_insn_gen_nop();
284 
285 	if (enable)
286 		return ftrace_modify_code(pc, nop, branch, true);
287 	else
288 		return ftrace_modify_code(pc, branch, nop, true);
289 }
290 
291 int ftrace_enable_ftrace_graph_caller(void)
292 {
293 	return ftrace_modify_graph_caller(true);
294 }
295 
296 int ftrace_disable_ftrace_graph_caller(void)
297 {
298 	return ftrace_modify_graph_caller(false);
299 }
300 #endif /* CONFIG_DYNAMIC_FTRACE */
301 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
302