1 /* 2 * arch/arm64/kernel/ftrace.c 3 * 4 * Copyright (C) 2013 Linaro Limited 5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/ftrace.h> 13 #include <linux/module.h> 14 #include <linux/swab.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/cacheflush.h> 18 #include <asm/debug-monitors.h> 19 #include <asm/ftrace.h> 20 #include <asm/insn.h> 21 22 #ifdef CONFIG_DYNAMIC_FTRACE 23 /* 24 * Replace a single instruction, which may be a branch or NOP. 25 * If @validate == true, a replaced instruction is checked against 'old'. 26 */ 27 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 28 bool validate) 29 { 30 u32 replaced; 31 32 /* 33 * Note: 34 * We are paranoid about modifying text, as if a bug were to happen, it 35 * could cause us to read or write to someplace that could cause harm. 36 * Carefully read and modify the code with aarch64_insn_*() which uses 37 * probe_kernel_*(), and make sure what we read is what we expected it 38 * to be before modifying it. 39 */ 40 if (validate) { 41 if (aarch64_insn_read((void *)pc, &replaced)) 42 return -EFAULT; 43 44 if (replaced != old) 45 return -EINVAL; 46 } 47 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 48 return -EPERM; 49 50 return 0; 51 } 52 53 /* 54 * Replace tracer function in ftrace_caller() 55 */ 56 int ftrace_update_ftrace_func(ftrace_func_t func) 57 { 58 unsigned long pc; 59 u32 new; 60 61 pc = (unsigned long)&ftrace_call; 62 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 63 AARCH64_INSN_BRANCH_LINK); 64 65 return ftrace_modify_code(pc, 0, new, false); 66 } 67 68 /* 69 * Turn on the call to ftrace_caller() in instrumented function 70 */ 71 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 72 { 73 unsigned long pc = rec->ip; 74 u32 old, new; 75 long offset = (long)pc - (long)addr; 76 77 if (offset < -SZ_128M || offset >= SZ_128M) { 78 #ifdef CONFIG_ARM64_MODULE_PLTS 79 struct plt_entry trampoline; 80 struct module *mod; 81 82 /* 83 * On kernels that support module PLTs, the offset between the 84 * branch instruction and its target may legally exceed the 85 * range of an ordinary relative 'bl' opcode. In this case, we 86 * need to branch via a trampoline in the module. 87 * 88 * NOTE: __module_text_address() must be called with preemption 89 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 90 * retains its validity throughout the remainder of this code. 91 */ 92 preempt_disable(); 93 mod = __module_text_address(pc); 94 preempt_enable(); 95 96 if (WARN_ON(!mod)) 97 return -EINVAL; 98 99 /* 100 * There is only one ftrace trampoline per module. For now, 101 * this is not a problem since on arm64, all dynamic ftrace 102 * invocations are routed via ftrace_caller(). This will need 103 * to be revisited if support for multiple ftrace entry points 104 * is added in the future, but for now, the pr_err() below 105 * deals with a theoretical issue only. 106 */ 107 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 108 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 109 &trampoline)) { 110 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 111 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 112 return -EINVAL; 113 } 114 115 /* point the trampoline to our ftrace entry point */ 116 module_disable_ro(mod); 117 *mod->arch.ftrace_trampoline = trampoline; 118 module_enable_ro(mod, true); 119 120 /* update trampoline before patching in the branch */ 121 smp_wmb(); 122 } 123 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 124 #else /* CONFIG_ARM64_MODULE_PLTS */ 125 return -EINVAL; 126 #endif /* CONFIG_ARM64_MODULE_PLTS */ 127 } 128 129 old = aarch64_insn_gen_nop(); 130 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 131 132 return ftrace_modify_code(pc, old, new, true); 133 } 134 135 /* 136 * Turn off the call to ftrace_caller() in instrumented function 137 */ 138 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 139 unsigned long addr) 140 { 141 unsigned long pc = rec->ip; 142 bool validate = true; 143 u32 old = 0, new; 144 long offset = (long)pc - (long)addr; 145 146 if (offset < -SZ_128M || offset >= SZ_128M) { 147 #ifdef CONFIG_ARM64_MODULE_PLTS 148 u32 replaced; 149 150 /* 151 * 'mod' is only set at module load time, but if we end up 152 * dealing with an out-of-range condition, we can assume it 153 * is due to a module being loaded far away from the kernel. 154 */ 155 if (!mod) { 156 preempt_disable(); 157 mod = __module_text_address(pc); 158 preempt_enable(); 159 160 if (WARN_ON(!mod)) 161 return -EINVAL; 162 } 163 164 /* 165 * The instruction we are about to patch may be a branch and 166 * link instruction that was redirected via a PLT entry. In 167 * this case, the normal validation will fail, but we can at 168 * least check that we are dealing with a branch and link 169 * instruction that points into the right module. 170 */ 171 if (aarch64_insn_read((void *)pc, &replaced)) 172 return -EFAULT; 173 174 if (!aarch64_insn_is_bl(replaced) || 175 !within_module(pc + aarch64_get_branch_offset(replaced), 176 mod)) 177 return -EINVAL; 178 179 validate = false; 180 #else /* CONFIG_ARM64_MODULE_PLTS */ 181 return -EINVAL; 182 #endif /* CONFIG_ARM64_MODULE_PLTS */ 183 } else { 184 old = aarch64_insn_gen_branch_imm(pc, addr, 185 AARCH64_INSN_BRANCH_LINK); 186 } 187 188 new = aarch64_insn_gen_nop(); 189 190 return ftrace_modify_code(pc, old, new, validate); 191 } 192 193 void arch_ftrace_update_code(int command) 194 { 195 command |= FTRACE_MAY_SLEEP; 196 ftrace_modify_all_code(command); 197 } 198 199 int __init ftrace_dyn_arch_init(void) 200 { 201 return 0; 202 } 203 #endif /* CONFIG_DYNAMIC_FTRACE */ 204 205 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 206 /* 207 * function_graph tracer expects ftrace_return_to_handler() to be called 208 * on the way back to parent. For this purpose, this function is called 209 * in _mcount() or ftrace_caller() to replace return address (*parent) on 210 * the call stack to return_to_handler. 211 * 212 * Note that @frame_pointer is used only for sanity check later. 213 */ 214 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 215 unsigned long frame_pointer) 216 { 217 unsigned long return_hooker = (unsigned long)&return_to_handler; 218 unsigned long old; 219 220 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 221 return; 222 223 /* 224 * Note: 225 * No protection against faulting at *parent, which may be seen 226 * on other archs. It's unlikely on AArch64. 227 */ 228 old = *parent; 229 230 if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) 231 *parent = return_hooker; 232 } 233 234 #ifdef CONFIG_DYNAMIC_FTRACE 235 /* 236 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 237 * depending on @enable. 238 */ 239 static int ftrace_modify_graph_caller(bool enable) 240 { 241 unsigned long pc = (unsigned long)&ftrace_graph_call; 242 u32 branch, nop; 243 244 branch = aarch64_insn_gen_branch_imm(pc, 245 (unsigned long)ftrace_graph_caller, 246 AARCH64_INSN_BRANCH_NOLINK); 247 nop = aarch64_insn_gen_nop(); 248 249 if (enable) 250 return ftrace_modify_code(pc, nop, branch, true); 251 else 252 return ftrace_modify_code(pc, branch, nop, true); 253 } 254 255 int ftrace_enable_ftrace_graph_caller(void) 256 { 257 return ftrace_modify_graph_caller(true); 258 } 259 260 int ftrace_disable_ftrace_graph_caller(void) 261 { 262 return ftrace_modify_graph_caller(false); 263 } 264 #endif /* CONFIG_DYNAMIC_FTRACE */ 265 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 266