1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 7 * 8 * Thanks goes to Steven Rostedt for writing the original x86 version. 9 */ 10 11 #include <linux/uaccess.h> 12 #include <linux/init.h> 13 #include <linux/ftrace.h> 14 #include <linux/syscalls.h> 15 16 #include <asm/asm.h> 17 #include <asm/asm-offsets.h> 18 #include <asm/cacheflush.h> 19 #include <asm/syscall.h> 20 #include <asm/uasm.h> 21 #include <asm/unistd.h> 22 23 #include <asm-generic/sections.h> 24 25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 26 #define MCOUNT_OFFSET_INSNS 5 27 #else 28 #define MCOUNT_OFFSET_INSNS 4 29 #endif 30 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 33 /* Arch override because MIPS doesn't need to run this from stop_machine() */ 34 void arch_ftrace_update_code(int command) 35 { 36 ftrace_modify_all_code(command); 37 } 38 39 #endif 40 41 /* 42 * Check if the address is in kernel space 43 * 44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call 45 * init_kernel_text() for Ftrace doesn't trace functions in init sections. 46 */ 47 static inline int in_kernel_space(unsigned long ip) 48 { 49 if (ip >= (unsigned long)_stext && 50 ip <= (unsigned long)_etext) 51 return 1; 52 return 0; 53 } 54 55 #ifdef CONFIG_DYNAMIC_FTRACE 56 57 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 58 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 59 #define JUMP_RANGE_MASK ((1UL << 28) - 1) 60 61 #define INSN_NOP 0x00000000 /* nop */ 62 #define INSN_JAL(addr) \ 63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 64 65 static unsigned int insn_jal_ftrace_caller __read_mostly; 66 static unsigned int insn_lui_v1_hi16_mcount __read_mostly; 67 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 68 69 static inline void ftrace_dyn_arch_init_insns(void) 70 { 71 u32 *buf; 72 unsigned int v1; 73 74 /* lui v1, hi16_mcount */ 75 v1 = 3; 76 buf = (u32 *)&insn_lui_v1_hi16_mcount; 77 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); 78 79 /* jal (ftrace_caller + 8), jump over the first two instruction */ 80 buf = (u32 *)&insn_jal_ftrace_caller; 81 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 82 83 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 84 /* j ftrace_graph_caller */ 85 buf = (u32 *)&insn_j_ftrace_graph_caller; 86 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 87 #endif 88 } 89 90 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 91 { 92 int faulted; 93 94 /* *(unsigned int *)ip = new_code; */ 95 safe_store_code(new_code, ip, faulted); 96 97 if (unlikely(faulted)) 98 return -EFAULT; 99 100 flush_icache_range(ip, ip + 8); 101 102 return 0; 103 } 104 105 #ifndef CONFIG_64BIT 106 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 107 unsigned int new_code2) 108 { 109 int faulted; 110 111 safe_store_code(new_code1, ip, faulted); 112 if (unlikely(faulted)) 113 return -EFAULT; 114 safe_store_code(new_code2, ip + 4, faulted); 115 if (unlikely(faulted)) 116 return -EFAULT; 117 flush_icache_range(ip, ip + 8); 118 return 0; 119 } 120 #endif 121 122 /* 123 * The details about the calling site of mcount on MIPS 124 * 125 * 1. For kernel: 126 * 127 * move at, ra 128 * jal _mcount --> nop 129 * 130 * 2. For modules: 131 * 132 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 133 * 134 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 135 * addiu v1, v1, low_16bit_of_mcount 136 * move at, ra 137 * move $12, ra_address 138 * jalr v1 139 * sub sp, sp, 8 140 * 1: offset = 5 instructions 141 * 2.2 For the Other situations 142 * 143 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 144 * addiu v1, v1, low_16bit_of_mcount 145 * move at, ra 146 * jalr v1 147 * nop | move $12, ra_address | sub sp, sp, 8 148 * 1: offset = 4 instructions 149 */ 150 151 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 152 153 int ftrace_make_nop(struct module *mod, 154 struct dyn_ftrace *rec, unsigned long addr) 155 { 156 unsigned int new; 157 unsigned long ip = rec->ip; 158 159 /* 160 * If ip is in kernel space, no long call, otherwise, long call is 161 * needed. 162 */ 163 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 164 #ifdef CONFIG_64BIT 165 return ftrace_modify_code(ip, new); 166 #else 167 /* 168 * On 32 bit MIPS platforms, gcc adds a stack adjust 169 * instruction in the delay slot after the branch to 170 * mcount and expects mcount to restore the sp on return. 171 * This is based on a legacy API and does nothing but 172 * waste instructions so it's being removed at runtime. 173 */ 174 return ftrace_modify_code_2(ip, new, INSN_NOP); 175 #endif 176 } 177 178 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 179 { 180 unsigned int new; 181 unsigned long ip = rec->ip; 182 183 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : 184 insn_lui_v1_hi16_mcount; 185 186 return ftrace_modify_code(ip, new); 187 } 188 189 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 190 191 int ftrace_update_ftrace_func(ftrace_func_t func) 192 { 193 unsigned int new; 194 195 new = INSN_JAL((unsigned long)func); 196 197 return ftrace_modify_code(FTRACE_CALL_IP, new); 198 } 199 200 int __init ftrace_dyn_arch_init(void *data) 201 { 202 /* Encode the instructions when booting */ 203 ftrace_dyn_arch_init_insns(); 204 205 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 206 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 207 208 /* The return code is retured via data */ 209 *(unsigned long *)data = 0; 210 211 return 0; 212 } 213 #endif /* CONFIG_DYNAMIC_FTRACE */ 214 215 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 216 217 #ifdef CONFIG_DYNAMIC_FTRACE 218 219 extern void ftrace_graph_call(void); 220 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) 221 222 int ftrace_enable_ftrace_graph_caller(void) 223 { 224 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 225 insn_j_ftrace_graph_caller); 226 } 227 228 int ftrace_disable_ftrace_graph_caller(void) 229 { 230 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); 231 } 232 233 #endif /* CONFIG_DYNAMIC_FTRACE */ 234 235 #ifndef KBUILD_MCOUNT_RA_ADDRESS 236 237 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 238 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 239 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 240 241 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long 242 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) 243 { 244 unsigned long sp, ip, tmp; 245 unsigned int code; 246 int faulted; 247 248 /* 249 * For module, move the ip from the return address after the 250 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 251 * kernel, move after the instruction "move ra, at"(offset is 16) 252 */ 253 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 254 255 /* 256 * search the text until finding the non-store instruction or "s{d,w} 257 * ra, offset(sp)" instruction 258 */ 259 do { 260 /* get the code at "ip": code = *(unsigned int *)ip; */ 261 safe_load_code(code, ip, faulted); 262 263 if (unlikely(faulted)) 264 return 0; 265 /* 266 * If we hit the non-store instruction before finding where the 267 * ra is stored, then this is a leaf function and it does not 268 * store the ra on the stack 269 */ 270 if ((code & S_R_SP) != S_R_SP) 271 return parent_ra_addr; 272 273 /* Move to the next instruction */ 274 ip -= 4; 275 } while ((code & S_RA_SP) != S_RA_SP); 276 277 sp = fp + (code & OFFSET_MASK); 278 279 /* tmp = *(unsigned long *)sp; */ 280 safe_load_stack(tmp, sp, faulted); 281 if (unlikely(faulted)) 282 return 0; 283 284 if (tmp == old_parent_ra) 285 return sp; 286 return 0; 287 } 288 289 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 290 291 /* 292 * Hook the return address and push it in the stack of return addrs 293 * in current thread info. 294 */ 295 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 296 unsigned long fp) 297 { 298 unsigned long old_parent_ra; 299 struct ftrace_graph_ent trace; 300 unsigned long return_hooker = (unsigned long) 301 &return_to_handler; 302 int faulted, insns; 303 304 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 305 return; 306 307 /* 308 * "parent_ra_addr" is the stack address saved the return address of 309 * the caller of _mcount. 310 * 311 * if the gcc < 4.5, a leaf function does not save the return address 312 * in the stack address, so, we "emulate" one in _mcount's stack space, 313 * and hijack it directly, but for a non-leaf function, it save the 314 * return address to the its own stack space, we can not hijack it 315 * directly, but need to find the real stack address, 316 * ftrace_get_parent_addr() does it! 317 * 318 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a 319 * non-leaf function, the location of the return address will be saved 320 * to $12 for us, and for a leaf function, only put a zero into $12. we 321 * do it in ftrace_graph_caller of mcount.S. 322 */ 323 324 /* old_parent_ra = *parent_ra_addr; */ 325 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); 326 if (unlikely(faulted)) 327 goto out; 328 #ifndef KBUILD_MCOUNT_RA_ADDRESS 329 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 330 old_parent_ra, (unsigned long)parent_ra_addr, fp); 331 /* 332 * If fails when getting the stack address of the non-leaf function's 333 * ra, stop function graph tracer and return 334 */ 335 if (parent_ra_addr == 0) 336 goto out; 337 #endif 338 /* *parent_ra_addr = return_hooker; */ 339 safe_store_stack(return_hooker, parent_ra_addr, faulted); 340 if (unlikely(faulted)) 341 goto out; 342 343 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) 344 == -EBUSY) { 345 *parent_ra_addr = old_parent_ra; 346 return; 347 } 348 349 /* 350 * Get the recorded ip of the current mcount calling site in the 351 * __mcount_loc section, which will be used to filter the function 352 * entries configured through the tracing/set_graph_function interface. 353 */ 354 355 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 356 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 357 358 /* Only trace if the calling function expects to */ 359 if (!ftrace_graph_entry(&trace)) { 360 current->curr_ret_stack--; 361 *parent_ra_addr = old_parent_ra; 362 } 363 return; 364 out: 365 ftrace_graph_stop(); 366 WARN_ON(1); 367 } 368 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 369 370 #ifdef CONFIG_FTRACE_SYSCALLS 371 372 #ifdef CONFIG_32BIT 373 unsigned long __init arch_syscall_addr(int nr) 374 { 375 return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; 376 } 377 #endif 378 379 #ifdef CONFIG_64BIT 380 381 unsigned long __init arch_syscall_addr(int nr) 382 { 383 #ifdef CONFIG_MIPS32_N32 384 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) 385 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; 386 #endif 387 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) 388 return (unsigned long)sys_call_table[nr - __NR_64_Linux]; 389 #ifdef CONFIG_MIPS32_O32 390 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) 391 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; 392 #endif 393 394 return (unsigned long) &sys_ni_syscall; 395 } 396 #endif 397 398 #endif /* CONFIG_FTRACE_SYSCALLS */ 399