1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com> 7 * 8 * Thanks goes to Steven Rostedt for writing the original x86 version. 9 */ 10 11 #include <linux/uaccess.h> 12 #include <linux/init.h> 13 #include <linux/ftrace.h> 14 15 #include <asm/asm.h> 16 #include <asm/asm-offsets.h> 17 #include <asm/cacheflush.h> 18 #include <asm/uasm.h> 19 20 #include <asm-generic/sections.h> 21 22 #ifdef CONFIG_DYNAMIC_FTRACE 23 24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 26 #define JUMP_RANGE_MASK ((1UL << 28) - 1) 27 28 #define INSN_NOP 0x00000000 /* nop */ 29 #define INSN_JAL(addr) \ 30 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 31 32 static unsigned int insn_jal_ftrace_caller __read_mostly; 33 static unsigned int insn_lui_v1_hi16_mcount __read_mostly; 34 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; 35 36 static inline void ftrace_dyn_arch_init_insns(void) 37 { 38 u32 *buf; 39 unsigned int v1; 40 41 /* lui v1, hi16_mcount */ 42 v1 = 3; 43 buf = (u32 *)&insn_lui_v1_hi16_mcount; 44 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); 45 46 /* jal (ftrace_caller + 8), jump over the first two instruction */ 47 buf = (u32 *)&insn_jal_ftrace_caller; 48 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); 49 50 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 51 /* j ftrace_graph_caller */ 52 buf = (u32 *)&insn_j_ftrace_graph_caller; 53 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 54 #endif 55 } 56 57 /* 58 * Check if the address is in kernel space 59 * 60 * Clone core_kernel_text() from kernel/extable.c, but doesn't call 61 * init_kernel_text() for Ftrace doesn't trace functions in init sections. 62 */ 63 static inline int in_kernel_space(unsigned long ip) 64 { 65 if (ip >= (unsigned long)_stext && 66 ip <= (unsigned long)_etext) 67 return 1; 68 return 0; 69 } 70 71 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 72 { 73 int faulted; 74 75 /* *(unsigned int *)ip = new_code; */ 76 safe_store_code(new_code, ip, faulted); 77 78 if (unlikely(faulted)) 79 return -EFAULT; 80 81 flush_icache_range(ip, ip + 8); 82 83 return 0; 84 } 85 86 /* 87 * The details about the calling site of mcount on MIPS 88 * 89 * 1. For kernel: 90 * 91 * move at, ra 92 * jal _mcount --> nop 93 * 94 * 2. For modules: 95 * 96 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 97 * 98 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 99 * addiu v1, v1, low_16bit_of_mcount 100 * move at, ra 101 * move $12, ra_address 102 * jalr v1 103 * sub sp, sp, 8 104 * 1: offset = 5 instructions 105 * 2.2 For the Other situations 106 * 107 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 108 * addiu v1, v1, low_16bit_of_mcount 109 * move at, ra 110 * jalr v1 111 * nop | move $12, ra_address | sub sp, sp, 8 112 * 1: offset = 4 instructions 113 */ 114 115 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 116 #define MCOUNT_OFFSET_INSNS 5 117 #else 118 #define MCOUNT_OFFSET_INSNS 4 119 #endif 120 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 121 122 int ftrace_make_nop(struct module *mod, 123 struct dyn_ftrace *rec, unsigned long addr) 124 { 125 unsigned int new; 126 unsigned long ip = rec->ip; 127 128 /* 129 * If ip is in kernel space, no long call, otherwise, long call is 130 * needed. 131 */ 132 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 133 134 return ftrace_modify_code(ip, new); 135 } 136 137 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 138 { 139 unsigned int new; 140 unsigned long ip = rec->ip; 141 142 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : 143 insn_lui_v1_hi16_mcount; 144 145 return ftrace_modify_code(ip, new); 146 } 147 148 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) 149 150 int ftrace_update_ftrace_func(ftrace_func_t func) 151 { 152 unsigned int new; 153 154 new = INSN_JAL((unsigned long)func); 155 156 return ftrace_modify_code(FTRACE_CALL_IP, new); 157 } 158 159 int __init ftrace_dyn_arch_init(void *data) 160 { 161 /* Encode the instructions when booting */ 162 ftrace_dyn_arch_init_insns(); 163 164 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 165 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 166 167 /* The return code is retured via data */ 168 *(unsigned long *)data = 0; 169 170 return 0; 171 } 172 #endif /* CONFIG_DYNAMIC_FTRACE */ 173 174 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 175 176 #ifdef CONFIG_DYNAMIC_FTRACE 177 178 extern void ftrace_graph_call(void); 179 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) 180 181 int ftrace_enable_ftrace_graph_caller(void) 182 { 183 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, 184 insn_j_ftrace_graph_caller); 185 } 186 187 int ftrace_disable_ftrace_graph_caller(void) 188 { 189 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); 190 } 191 192 #endif /* CONFIG_DYNAMIC_FTRACE */ 193 194 #ifndef KBUILD_MCOUNT_RA_ADDRESS 195 196 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ 197 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 198 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 199 200 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long 201 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) 202 { 203 unsigned long sp, ip, tmp; 204 unsigned int code; 205 int faulted; 206 207 /* 208 * For module, move the ip from the return address after the 209 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 210 * kernel, move after the instruction "move ra, at"(offset is 16) 211 */ 212 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 213 214 /* 215 * search the text until finding the non-store instruction or "s{d,w} 216 * ra, offset(sp)" instruction 217 */ 218 do { 219 /* get the code at "ip": code = *(unsigned int *)ip; */ 220 safe_load_code(code, ip, faulted); 221 222 if (unlikely(faulted)) 223 return 0; 224 /* 225 * If we hit the non-store instruction before finding where the 226 * ra is stored, then this is a leaf function and it does not 227 * store the ra on the stack 228 */ 229 if ((code & S_R_SP) != S_R_SP) 230 return parent_ra_addr; 231 232 /* Move to the next instruction */ 233 ip -= 4; 234 } while ((code & S_RA_SP) != S_RA_SP); 235 236 sp = fp + (code & OFFSET_MASK); 237 238 /* tmp = *(unsigned long *)sp; */ 239 safe_load_stack(tmp, sp, faulted); 240 if (unlikely(faulted)) 241 return 0; 242 243 if (tmp == old_parent_ra) 244 return sp; 245 return 0; 246 } 247 248 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ 249 250 /* 251 * Hook the return address and push it in the stack of return addrs 252 * in current thread info. 253 */ 254 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 255 unsigned long fp) 256 { 257 unsigned long old_parent_ra; 258 struct ftrace_graph_ent trace; 259 unsigned long return_hooker = (unsigned long) 260 &return_to_handler; 261 int faulted, insns; 262 263 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 264 return; 265 266 /* 267 * "parent_ra_addr" is the stack address saved the return address of 268 * the caller of _mcount. 269 * 270 * if the gcc < 4.5, a leaf function does not save the return address 271 * in the stack address, so, we "emulate" one in _mcount's stack space, 272 * and hijack it directly, but for a non-leaf function, it save the 273 * return address to the its own stack space, we can not hijack it 274 * directly, but need to find the real stack address, 275 * ftrace_get_parent_addr() does it! 276 * 277 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a 278 * non-leaf function, the location of the return address will be saved 279 * to $12 for us, and for a leaf function, only put a zero into $12. we 280 * do it in ftrace_graph_caller of mcount.S. 281 */ 282 283 /* old_parent_ra = *parent_ra_addr; */ 284 safe_load_stack(old_parent_ra, parent_ra_addr, faulted); 285 if (unlikely(faulted)) 286 goto out; 287 #ifndef KBUILD_MCOUNT_RA_ADDRESS 288 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 289 old_parent_ra, (unsigned long)parent_ra_addr, fp); 290 /* 291 * If fails when getting the stack address of the non-leaf function's 292 * ra, stop function graph tracer and return 293 */ 294 if (parent_ra_addr == 0) 295 goto out; 296 #endif 297 /* *parent_ra_addr = return_hooker; */ 298 safe_store_stack(return_hooker, parent_ra_addr, faulted); 299 if (unlikely(faulted)) 300 goto out; 301 302 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) 303 == -EBUSY) { 304 *parent_ra_addr = old_parent_ra; 305 return; 306 } 307 308 /* 309 * Get the recorded ip of the current mcount calling site in the 310 * __mcount_loc section, which will be used to filter the function 311 * entries configured through the tracing/set_graph_function interface. 312 */ 313 314 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 315 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 316 317 /* Only trace if the calling function expects to */ 318 if (!ftrace_graph_entry(&trace)) { 319 current->curr_ret_stack--; 320 *parent_ra_addr = old_parent_ra; 321 } 322 return; 323 out: 324 ftrace_graph_stop(); 325 WARN_ON(1); 326 } 327 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 328