1 /* 2 * Dynamic function tracing support. 3 * 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> 6 * 7 * For licencing details, see COPYING. 8 * 9 * Defines low-level handling of mcount calls when the kernel 10 * is compiled with the -pg flag. When using dynamic ftrace, the 11 * mcount call-sites get patched with NOP till they are enabled. 12 * All code mutation routines here are called under stop_machine(). 13 */ 14 15 #include <linux/ftrace.h> 16 #include <linux/uaccess.h> 17 #include <linux/module.h> 18 #include <linux/stop_machine.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/opcodes.h> 22 #include <asm/ftrace.h> 23 #include <asm/insn.h> 24 #include <asm/set_memory.h> 25 26 #ifdef CONFIG_THUMB2_KERNEL 27 #define NOP 0xf85deb04 /* pop.w {lr} */ 28 #else 29 #define NOP 0xe8bd4000 /* pop {lr} */ 30 #endif 31 32 #ifdef CONFIG_DYNAMIC_FTRACE 33 34 static int __ftrace_modify_code(void *data) 35 { 36 int *command = data; 37 38 set_kernel_text_rw(); 39 ftrace_modify_all_code(*command); 40 set_kernel_text_ro(); 41 42 return 0; 43 } 44 45 void arch_ftrace_update_code(int command) 46 { 47 stop_machine(__ftrace_modify_code, &command, NULL); 48 } 49 50 #ifdef CONFIG_OLD_MCOUNT 51 #define OLD_MCOUNT_ADDR ((unsigned long) mcount) 52 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 53 54 #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 55 56 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 57 { 58 return rec->arch.old_mcount ? OLD_NOP : NOP; 59 } 60 61 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) 62 { 63 if (!rec->arch.old_mcount) 64 return addr; 65 66 if (addr == MCOUNT_ADDR) 67 addr = OLD_MCOUNT_ADDR; 68 else if (addr == FTRACE_ADDR) 69 addr = OLD_FTRACE_ADDR; 70 71 return addr; 72 } 73 #else 74 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 75 { 76 return NOP; 77 } 78 79 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) 80 { 81 return addr; 82 } 83 #endif 84 85 int ftrace_arch_code_modify_prepare(void) 86 { 87 set_all_modules_text_rw(); 88 return 0; 89 } 90 91 int ftrace_arch_code_modify_post_process(void) 92 { 93 set_all_modules_text_ro(); 94 /* Make sure any TLB misses during machine stop are cleared. */ 95 flush_tlb_all(); 96 return 0; 97 } 98 99 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 100 { 101 return arm_gen_branch_link(pc, addr); 102 } 103 104 static int ftrace_modify_code(unsigned long pc, unsigned long old, 105 unsigned long new, bool validate) 106 { 107 unsigned long replaced; 108 109 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 110 old = __opcode_to_mem_thumb32(old); 111 new = __opcode_to_mem_thumb32(new); 112 } else { 113 old = __opcode_to_mem_arm(old); 114 new = __opcode_to_mem_arm(new); 115 } 116 117 if (validate) { 118 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) 119 return -EFAULT; 120 121 if (replaced != old) 122 return -EINVAL; 123 } 124 125 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) 126 return -EPERM; 127 128 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); 129 130 return 0; 131 } 132 133 int ftrace_update_ftrace_func(ftrace_func_t func) 134 { 135 unsigned long pc; 136 unsigned long new; 137 int ret; 138 139 pc = (unsigned long)&ftrace_call; 140 new = ftrace_call_replace(pc, (unsigned long)func); 141 142 ret = ftrace_modify_code(pc, 0, new, false); 143 144 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 145 if (!ret) { 146 pc = (unsigned long)&ftrace_regs_call; 147 new = ftrace_call_replace(pc, (unsigned long)func); 148 149 ret = ftrace_modify_code(pc, 0, new, false); 150 } 151 #endif 152 153 #ifdef CONFIG_OLD_MCOUNT 154 if (!ret) { 155 pc = (unsigned long)&ftrace_call_old; 156 new = ftrace_call_replace(pc, (unsigned long)func); 157 158 ret = ftrace_modify_code(pc, 0, new, false); 159 } 160 #endif 161 162 return ret; 163 } 164 165 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 166 { 167 unsigned long new, old; 168 unsigned long ip = rec->ip; 169 170 old = ftrace_nop_replace(rec); 171 172 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 173 174 return ftrace_modify_code(rec->ip, old, new, true); 175 } 176 177 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 178 179 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 180 unsigned long addr) 181 { 182 unsigned long new, old; 183 unsigned long ip = rec->ip; 184 185 old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); 186 187 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 188 189 return ftrace_modify_code(rec->ip, old, new, true); 190 } 191 192 #endif 193 194 int ftrace_make_nop(struct module *mod, 195 struct dyn_ftrace *rec, unsigned long addr) 196 { 197 unsigned long ip = rec->ip; 198 unsigned long old; 199 unsigned long new; 200 int ret; 201 202 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 203 new = ftrace_nop_replace(rec); 204 ret = ftrace_modify_code(ip, old, new, true); 205 206 #ifdef CONFIG_OLD_MCOUNT 207 if (ret == -EINVAL && addr == MCOUNT_ADDR) { 208 rec->arch.old_mcount = true; 209 210 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 211 new = ftrace_nop_replace(rec); 212 ret = ftrace_modify_code(ip, old, new, true); 213 } 214 #endif 215 216 return ret; 217 } 218 219 int __init ftrace_dyn_arch_init(void) 220 { 221 return 0; 222 } 223 #endif /* CONFIG_DYNAMIC_FTRACE */ 224 225 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 226 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 227 unsigned long frame_pointer) 228 { 229 unsigned long return_hooker = (unsigned long) &return_to_handler; 230 struct ftrace_graph_ent trace; 231 unsigned long old; 232 int err; 233 234 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 235 return; 236 237 old = *parent; 238 *parent = return_hooker; 239 240 trace.func = self_addr; 241 trace.depth = current->curr_ret_stack + 1; 242 243 /* Only trace if the calling function expects to */ 244 if (!ftrace_graph_entry(&trace)) { 245 *parent = old; 246 return; 247 } 248 249 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 250 frame_pointer, NULL); 251 if (err == -EBUSY) { 252 *parent = old; 253 return; 254 } 255 } 256 257 #ifdef CONFIG_DYNAMIC_FTRACE 258 extern unsigned long ftrace_graph_call; 259 extern unsigned long ftrace_graph_call_old; 260 extern void ftrace_graph_caller_old(void); 261 extern unsigned long ftrace_graph_regs_call; 262 extern void ftrace_graph_regs_caller(void); 263 264 static int __ftrace_modify_caller(unsigned long *callsite, 265 void (*func) (void), bool enable) 266 { 267 unsigned long caller_fn = (unsigned long) func; 268 unsigned long pc = (unsigned long) callsite; 269 unsigned long branch = arm_gen_branch(pc, caller_fn); 270 unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 271 unsigned long old = enable ? nop : branch; 272 unsigned long new = enable ? branch : nop; 273 274 return ftrace_modify_code(pc, old, new, true); 275 } 276 277 static int ftrace_modify_graph_caller(bool enable) 278 { 279 int ret; 280 281 ret = __ftrace_modify_caller(&ftrace_graph_call, 282 ftrace_graph_caller, 283 enable); 284 285 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 286 if (!ret) 287 ret = __ftrace_modify_caller(&ftrace_graph_regs_call, 288 ftrace_graph_regs_caller, 289 enable); 290 #endif 291 292 293 #ifdef CONFIG_OLD_MCOUNT 294 if (!ret) 295 ret = __ftrace_modify_caller(&ftrace_graph_call_old, 296 ftrace_graph_caller_old, 297 enable); 298 #endif 299 300 return ret; 301 } 302 303 int ftrace_enable_ftrace_graph_caller(void) 304 { 305 return ftrace_modify_graph_caller(true); 306 } 307 308 int ftrace_disable_ftrace_graph_caller(void) 309 { 310 return ftrace_modify_graph_caller(false); 311 } 312 #endif /* CONFIG_DYNAMIC_FTRACE */ 313 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 314