1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/kernel/ftrace.c 4 * 5 * Copyright (C) 2013 Linaro Limited 6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 7 */ 8 9 #include <linux/ftrace.h> 10 #include <linux/module.h> 11 #include <linux/swab.h> 12 #include <linux/uaccess.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/debug-monitors.h> 16 #include <asm/ftrace.h> 17 #include <asm/insn.h> 18 19 #ifdef CONFIG_DYNAMIC_FTRACE 20 /* 21 * Replace a single instruction, which may be a branch or NOP. 22 * If @validate == true, a replaced instruction is checked against 'old'. 23 */ 24 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 25 bool validate) 26 { 27 u32 replaced; 28 29 /* 30 * Note: 31 * We are paranoid about modifying text, as if a bug were to happen, it 32 * could cause us to read or write to someplace that could cause harm. 33 * Carefully read and modify the code with aarch64_insn_*() which uses 34 * probe_kernel_*(), and make sure what we read is what we expected it 35 * to be before modifying it. 36 */ 37 if (validate) { 38 if (aarch64_insn_read((void *)pc, &replaced)) 39 return -EFAULT; 40 41 if (replaced != old) 42 return -EINVAL; 43 } 44 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 45 return -EPERM; 46 47 return 0; 48 } 49 50 /* 51 * Replace tracer function in ftrace_caller() 52 */ 53 int ftrace_update_ftrace_func(ftrace_func_t func) 54 { 55 unsigned long pc; 56 u32 new; 57 58 pc = (unsigned long)function_nocfi(ftrace_call); 59 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 60 AARCH64_INSN_BRANCH_LINK); 61 62 return ftrace_modify_code(pc, 0, new, false); 63 } 64 65 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) 66 { 67 #ifdef CONFIG_ARM64_MODULE_PLTS 68 struct plt_entry *plt = mod->arch.ftrace_trampolines; 69 70 if (addr == FTRACE_ADDR) 71 return &plt[FTRACE_PLT_IDX]; 72 if (addr == FTRACE_REGS_ADDR && 73 IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) 74 return &plt[FTRACE_REGS_PLT_IDX]; 75 #endif 76 return NULL; 77 } 78 79 /* 80 * Turn on the call to ftrace_caller() in instrumented function 81 */ 82 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 83 { 84 unsigned long pc = rec->ip; 85 u32 old, new; 86 long offset = (long)pc - (long)addr; 87 88 if (offset < -SZ_128M || offset >= SZ_128M) { 89 struct module *mod; 90 struct plt_entry *plt; 91 92 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 93 return -EINVAL; 94 95 /* 96 * On kernels that support module PLTs, the offset between the 97 * branch instruction and its target may legally exceed the 98 * range of an ordinary relative 'bl' opcode. In this case, we 99 * need to branch via a trampoline in the module. 100 * 101 * NOTE: __module_text_address() must be called with preemption 102 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 103 * retains its validity throughout the remainder of this code. 104 */ 105 preempt_disable(); 106 mod = __module_text_address(pc); 107 preempt_enable(); 108 109 if (WARN_ON(!mod)) 110 return -EINVAL; 111 112 plt = get_ftrace_plt(mod, addr); 113 if (!plt) { 114 pr_err("ftrace: no module PLT for %ps\n", (void *)addr); 115 return -EINVAL; 116 } 117 118 addr = (unsigned long)plt; 119 } 120 121 old = aarch64_insn_gen_nop(); 122 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 123 124 return ftrace_modify_code(pc, old, new, true); 125 } 126 127 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 128 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 129 unsigned long addr) 130 { 131 unsigned long pc = rec->ip; 132 u32 old, new; 133 134 old = aarch64_insn_gen_branch_imm(pc, old_addr, 135 AARCH64_INSN_BRANCH_LINK); 136 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 137 138 return ftrace_modify_code(pc, old, new, true); 139 } 140 141 /* 142 * The compiler has inserted two NOPs before the regular function prologue. 143 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, 144 * and x9-x18 are free for our use. 145 * 146 * At runtime we want to be able to swing a single NOP <-> BL to enable or 147 * disable the ftrace call. The BL requires us to save the original LR value, 148 * so here we insert a <MOV X9, LR> over the first NOP so the instructions 149 * before the regular prologue are: 150 * 151 * | Compiled | Disabled | Enabled | 152 * +----------+------------+------------+ 153 * | NOP | MOV X9, LR | MOV X9, LR | 154 * | NOP | NOP | BL <entry> | 155 * 156 * The LR value will be recovered by ftrace_regs_entry, and restored into LR 157 * before returning to the regular function prologue. When a function is not 158 * being traced, the MOV is not harmful given x9 is not live per the AAPCS. 159 * 160 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of 161 * the BL. 162 */ 163 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 164 { 165 unsigned long pc = rec->ip - AARCH64_INSN_SIZE; 166 u32 old, new; 167 168 old = aarch64_insn_gen_nop(); 169 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, 170 AARCH64_INSN_REG_LR, 171 AARCH64_INSN_VARIANT_64BIT); 172 return ftrace_modify_code(pc, old, new, true); 173 } 174 #endif 175 176 /* 177 * Turn off the call to ftrace_caller() in instrumented function 178 */ 179 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 180 unsigned long addr) 181 { 182 unsigned long pc = rec->ip; 183 bool validate = true; 184 u32 old = 0, new; 185 long offset = (long)pc - (long)addr; 186 187 if (offset < -SZ_128M || offset >= SZ_128M) { 188 u32 replaced; 189 190 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 191 return -EINVAL; 192 193 /* 194 * 'mod' is only set at module load time, but if we end up 195 * dealing with an out-of-range condition, we can assume it 196 * is due to a module being loaded far away from the kernel. 197 */ 198 if (!mod) { 199 preempt_disable(); 200 mod = __module_text_address(pc); 201 preempt_enable(); 202 203 if (WARN_ON(!mod)) 204 return -EINVAL; 205 } 206 207 /* 208 * The instruction we are about to patch may be a branch and 209 * link instruction that was redirected via a PLT entry. In 210 * this case, the normal validation will fail, but we can at 211 * least check that we are dealing with a branch and link 212 * instruction that points into the right module. 213 */ 214 if (aarch64_insn_read((void *)pc, &replaced)) 215 return -EFAULT; 216 217 if (!aarch64_insn_is_bl(replaced) || 218 !within_module(pc + aarch64_get_branch_offset(replaced), 219 mod)) 220 return -EINVAL; 221 222 validate = false; 223 } else { 224 old = aarch64_insn_gen_branch_imm(pc, addr, 225 AARCH64_INSN_BRANCH_LINK); 226 } 227 228 new = aarch64_insn_gen_nop(); 229 230 return ftrace_modify_code(pc, old, new, validate); 231 } 232 233 void arch_ftrace_update_code(int command) 234 { 235 command |= FTRACE_MAY_SLEEP; 236 ftrace_modify_all_code(command); 237 } 238 239 int __init ftrace_dyn_arch_init(void) 240 { 241 return 0; 242 } 243 #endif /* CONFIG_DYNAMIC_FTRACE */ 244 245 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 246 /* 247 * function_graph tracer expects ftrace_return_to_handler() to be called 248 * on the way back to parent. For this purpose, this function is called 249 * in _mcount() or ftrace_caller() to replace return address (*parent) on 250 * the call stack to return_to_handler. 251 * 252 * Note that @frame_pointer is used only for sanity check later. 253 */ 254 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 255 unsigned long frame_pointer) 256 { 257 unsigned long return_hooker = (unsigned long)&return_to_handler; 258 unsigned long old; 259 260 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 261 return; 262 263 /* 264 * Note: 265 * No protection against faulting at *parent, which may be seen 266 * on other archs. It's unlikely on AArch64. 267 */ 268 old = *parent; 269 270 if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) 271 *parent = return_hooker; 272 } 273 274 #ifdef CONFIG_DYNAMIC_FTRACE 275 /* 276 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 277 * depending on @enable. 278 */ 279 static int ftrace_modify_graph_caller(bool enable) 280 { 281 unsigned long pc = (unsigned long)&ftrace_graph_call; 282 u32 branch, nop; 283 284 branch = aarch64_insn_gen_branch_imm(pc, 285 (unsigned long)ftrace_graph_caller, 286 AARCH64_INSN_BRANCH_NOLINK); 287 nop = aarch64_insn_gen_nop(); 288 289 if (enable) 290 return ftrace_modify_code(pc, nop, branch, true); 291 else 292 return ftrace_modify_code(pc, branch, nop, true); 293 } 294 295 int ftrace_enable_ftrace_graph_caller(void) 296 { 297 return ftrace_modify_graph_caller(true); 298 } 299 300 int ftrace_disable_ftrace_graph_caller(void) 301 { 302 return ftrace_modify_graph_caller(false); 303 } 304 #endif /* CONFIG_DYNAMIC_FTRACE */ 305 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 306