1 /* 2 * arch/arm64/kernel/ftrace.c 3 * 4 * Copyright (C) 2013 Linaro Limited 5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/ftrace.h> 13 #include <linux/module.h> 14 #include <linux/swab.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/cacheflush.h> 18 #include <asm/debug-monitors.h> 19 #include <asm/ftrace.h> 20 #include <asm/insn.h> 21 22 #ifdef CONFIG_DYNAMIC_FTRACE 23 /* 24 * Replace a single instruction, which may be a branch or NOP. 25 * If @validate == true, a replaced instruction is checked against 'old'. 26 */ 27 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, 28 bool validate) 29 { 30 u32 replaced; 31 32 /* 33 * Note: 34 * We are paranoid about modifying text, as if a bug were to happen, it 35 * could cause us to read or write to someplace that could cause harm. 36 * Carefully read and modify the code with aarch64_insn_*() which uses 37 * probe_kernel_*(), and make sure what we read is what we expected it 38 * to be before modifying it. 39 */ 40 if (validate) { 41 if (aarch64_insn_read((void *)pc, &replaced)) 42 return -EFAULT; 43 44 if (replaced != old) 45 return -EINVAL; 46 } 47 if (aarch64_insn_patch_text_nosync((void *)pc, new)) 48 return -EPERM; 49 50 return 0; 51 } 52 53 /* 54 * Replace tracer function in ftrace_caller() 55 */ 56 int ftrace_update_ftrace_func(ftrace_func_t func) 57 { 58 unsigned long pc; 59 u32 new; 60 61 pc = (unsigned long)&ftrace_call; 62 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 63 AARCH64_INSN_BRANCH_LINK); 64 65 return ftrace_modify_code(pc, 0, new, false); 66 } 67 68 /* 69 * Turn on the call to ftrace_caller() in instrumented function 70 */ 71 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 72 { 73 unsigned long pc = rec->ip; 74 u32 old, new; 75 long offset = (long)pc - (long)addr; 76 77 if (offset < -SZ_128M || offset >= SZ_128M) { 78 #ifdef CONFIG_ARM64_MODULE_PLTS 79 struct plt_entry trampoline; 80 struct module *mod; 81 82 /* 83 * On kernels that support module PLTs, the offset between the 84 * branch instruction and its target may legally exceed the 85 * range of an ordinary relative 'bl' opcode. In this case, we 86 * need to branch via a trampoline in the module. 87 * 88 * NOTE: __module_text_address() must be called with preemption 89 * disabled, but we can rely on ftrace_lock to ensure that 'mod' 90 * retains its validity throughout the remainder of this code. 91 */ 92 preempt_disable(); 93 mod = __module_text_address(pc); 94 preempt_enable(); 95 96 if (WARN_ON(!mod)) 97 return -EINVAL; 98 99 /* 100 * There is only one ftrace trampoline per module. For now, 101 * this is not a problem since on arm64, all dynamic ftrace 102 * invocations are routed via ftrace_caller(). This will need 103 * to be revisited if support for multiple ftrace entry points 104 * is added in the future, but for now, the pr_err() below 105 * deals with a theoretical issue only. 106 */ 107 trampoline = get_plt_entry(addr); 108 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 109 &trampoline)) { 110 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 111 &(struct plt_entry){})) { 112 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 113 return -EINVAL; 114 } 115 116 /* point the trampoline to our ftrace entry point */ 117 module_disable_ro(mod); 118 *mod->arch.ftrace_trampoline = trampoline; 119 module_enable_ro(mod, true); 120 121 /* update trampoline before patching in the branch */ 122 smp_wmb(); 123 } 124 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 125 #else /* CONFIG_ARM64_MODULE_PLTS */ 126 return -EINVAL; 127 #endif /* CONFIG_ARM64_MODULE_PLTS */ 128 } 129 130 old = aarch64_insn_gen_nop(); 131 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 132 133 return ftrace_modify_code(pc, old, new, true); 134 } 135 136 /* 137 * Turn off the call to ftrace_caller() in instrumented function 138 */ 139 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 140 unsigned long addr) 141 { 142 unsigned long pc = rec->ip; 143 bool validate = true; 144 u32 old = 0, new; 145 long offset = (long)pc - (long)addr; 146 147 if (offset < -SZ_128M || offset >= SZ_128M) { 148 #ifdef CONFIG_ARM64_MODULE_PLTS 149 u32 replaced; 150 151 /* 152 * 'mod' is only set at module load time, but if we end up 153 * dealing with an out-of-range condition, we can assume it 154 * is due to a module being loaded far away from the kernel. 155 */ 156 if (!mod) { 157 preempt_disable(); 158 mod = __module_text_address(pc); 159 preempt_enable(); 160 161 if (WARN_ON(!mod)) 162 return -EINVAL; 163 } 164 165 /* 166 * The instruction we are about to patch may be a branch and 167 * link instruction that was redirected via a PLT entry. In 168 * this case, the normal validation will fail, but we can at 169 * least check that we are dealing with a branch and link 170 * instruction that points into the right module. 171 */ 172 if (aarch64_insn_read((void *)pc, &replaced)) 173 return -EFAULT; 174 175 if (!aarch64_insn_is_bl(replaced) || 176 !within_module(pc + aarch64_get_branch_offset(replaced), 177 mod)) 178 return -EINVAL; 179 180 validate = false; 181 #else /* CONFIG_ARM64_MODULE_PLTS */ 182 return -EINVAL; 183 #endif /* CONFIG_ARM64_MODULE_PLTS */ 184 } else { 185 old = aarch64_insn_gen_branch_imm(pc, addr, 186 AARCH64_INSN_BRANCH_LINK); 187 } 188 189 new = aarch64_insn_gen_nop(); 190 191 return ftrace_modify_code(pc, old, new, validate); 192 } 193 194 void arch_ftrace_update_code(int command) 195 { 196 ftrace_modify_all_code(command); 197 } 198 199 int __init ftrace_dyn_arch_init(void) 200 { 201 return 0; 202 } 203 #endif /* CONFIG_DYNAMIC_FTRACE */ 204 205 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 206 /* 207 * function_graph tracer expects ftrace_return_to_handler() to be called 208 * on the way back to parent. For this purpose, this function is called 209 * in _mcount() or ftrace_caller() to replace return address (*parent) on 210 * the call stack to return_to_handler. 211 * 212 * Note that @frame_pointer is used only for sanity check later. 213 */ 214 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 215 unsigned long frame_pointer) 216 { 217 unsigned long return_hooker = (unsigned long)&return_to_handler; 218 unsigned long old; 219 struct ftrace_graph_ent trace; 220 int err; 221 222 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 223 return; 224 225 /* 226 * Note: 227 * No protection against faulting at *parent, which may be seen 228 * on other archs. It's unlikely on AArch64. 229 */ 230 old = *parent; 231 232 trace.func = self_addr; 233 trace.depth = current->curr_ret_stack + 1; 234 235 /* Only trace if the calling function expects to */ 236 if (!ftrace_graph_entry(&trace)) 237 return; 238 239 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 240 frame_pointer, NULL); 241 if (err == -EBUSY) 242 return; 243 else 244 *parent = return_hooker; 245 } 246 247 #ifdef CONFIG_DYNAMIC_FTRACE 248 /* 249 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() 250 * depending on @enable. 251 */ 252 static int ftrace_modify_graph_caller(bool enable) 253 { 254 unsigned long pc = (unsigned long)&ftrace_graph_call; 255 u32 branch, nop; 256 257 branch = aarch64_insn_gen_branch_imm(pc, 258 (unsigned long)ftrace_graph_caller, 259 AARCH64_INSN_BRANCH_NOLINK); 260 nop = aarch64_insn_gen_nop(); 261 262 if (enable) 263 return ftrace_modify_code(pc, nop, branch, true); 264 else 265 return ftrace_modify_code(pc, branch, nop, true); 266 } 267 268 int ftrace_enable_ftrace_graph_caller(void) 269 { 270 return ftrace_modify_graph_caller(true); 271 } 272 273 int ftrace_disable_ftrace_graph_caller(void) 274 { 275 return ftrace_modify_graph_caller(false); 276 } 277 #endif /* CONFIG_DYNAMIC_FTRACE */ 278 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 279