1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracer architecture backend. 4 * 5 * Copyright IBM Corp. 2009,2014 6 * 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/moduleloader.h> 11 #include <linux/hardirq.h> 12 #include <linux/uaccess.h> 13 #include <linux/ftrace.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/kprobes.h> 17 #include <trace/syscall.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/text-patching.h> 20 #include <asm/cacheflush.h> 21 #include <asm/ftrace.lds.h> 22 #include <asm/nospec-branch.h> 23 #include <asm/set_memory.h> 24 #include "entry.h" 25 #include "ftrace.h" 26 27 /* 28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8) 29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags 30 * (since gcc 9 / clang 10) is used. 31 * In both cases the original and also the disabled function prologue contains 32 * only a single six byte instruction and looks like this: 33 * > brcl 0,0 # offset 0 34 * To enable ftrace the code gets patched like above and afterwards looks 35 * like this: 36 * > brasl %r0,ftrace_caller # offset 0 37 * 38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop. 39 * The ftrace function gets called with a non-standard C function call ABI 40 * where r0 contains the return address. It is also expected that the called 41 * function only clobbers r0 and r1, but restores r2-r15. 42 * For module code we can't directly jump to ftrace caller, but need a 43 * trampoline (ftrace_plt), which clobbers also r1. 44 */ 45 46 void *ftrace_func __read_mostly = ftrace_stub; 47 struct ftrace_insn { 48 u16 opc; 49 s32 disp; 50 } __packed; 51 52 asm( 53 " .align 16\n" 54 "ftrace_shared_hotpatch_trampoline_br:\n" 55 " lmg %r0,%r1,2(%r1)\n" 56 " br %r1\n" 57 "ftrace_shared_hotpatch_trampoline_br_end:\n" 58 ); 59 60 #ifdef CONFIG_EXPOLINE 61 asm( 62 " .align 16\n" 63 "ftrace_shared_hotpatch_trampoline_exrl:\n" 64 " lmg %r0,%r1,2(%r1)\n" 65 " exrl %r0,0f\n" 66 " j .\n" 67 "0: br %r1\n" 68 "ftrace_shared_hotpatch_trampoline_exrl_end:\n" 69 ); 70 #endif /* CONFIG_EXPOLINE */ 71 72 #ifdef CONFIG_MODULES 73 static char *ftrace_plt; 74 #endif /* CONFIG_MODULES */ 75 76 static const char *ftrace_shared_hotpatch_trampoline(const char **end) 77 { 78 const char *tstart, *tend; 79 80 tstart = ftrace_shared_hotpatch_trampoline_br; 81 tend = ftrace_shared_hotpatch_trampoline_br_end; 82 #ifdef CONFIG_EXPOLINE 83 if (!nospec_disable) { 84 tstart = ftrace_shared_hotpatch_trampoline_exrl; 85 tend = ftrace_shared_hotpatch_trampoline_exrl_end; 86 } 87 #endif /* CONFIG_EXPOLINE */ 88 if (end) 89 *end = tend; 90 return tstart; 91 } 92 93 bool ftrace_need_init_nop(void) 94 { 95 return true; 96 } 97 98 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 99 { 100 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = 101 __ftrace_hotpatch_trampolines_start; 102 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 }; 103 static struct ftrace_hotpatch_trampoline *trampoline; 104 struct ftrace_hotpatch_trampoline **next_trampoline; 105 struct ftrace_hotpatch_trampoline *trampolines_end; 106 struct ftrace_hotpatch_trampoline tmp; 107 struct ftrace_insn *insn; 108 const char *shared; 109 s32 disp; 110 111 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) != 112 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE); 113 114 next_trampoline = &next_vmlinux_trampoline; 115 trampolines_end = __ftrace_hotpatch_trampolines_end; 116 shared = ftrace_shared_hotpatch_trampoline(NULL); 117 #ifdef CONFIG_MODULES 118 if (mod) { 119 next_trampoline = &mod->arch.next_trampoline; 120 trampolines_end = mod->arch.trampolines_end; 121 shared = ftrace_plt; 122 } 123 #endif 124 125 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end)) 126 return -ENOMEM; 127 trampoline = (*next_trampoline)++; 128 129 /* Check for the compiler-generated fentry nop (brcl 0, .). */ 130 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig)))) 131 return -EINVAL; 132 133 /* Generate the trampoline. */ 134 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */ 135 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2; 136 tmp.interceptor = FTRACE_ADDR; 137 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn); 138 s390_kernel_write(trampoline, &tmp, sizeof(tmp)); 139 140 /* Generate a jump to the trampoline. */ 141 disp = ((char *)trampoline - (char *)rec->ip) / 2; 142 insn = (struct ftrace_insn *)rec->ip; 143 s390_kernel_write(&insn->disp, &disp, sizeof(disp)); 144 145 return 0; 146 } 147 148 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec) 149 { 150 struct ftrace_hotpatch_trampoline *trampoline; 151 struct ftrace_insn insn; 152 s64 disp; 153 u16 opc; 154 155 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn))) 156 return ERR_PTR(-EFAULT); 157 disp = (s64)insn.disp * 2; 158 trampoline = (void *)(rec->ip + disp); 159 if (get_kernel_nofault(opc, &trampoline->brasl_opc)) 160 return ERR_PTR(-EFAULT); 161 if (opc != 0xc015) 162 return ERR_PTR(-EINVAL); 163 return trampoline; 164 } 165 166 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 167 unsigned long addr) 168 { 169 struct ftrace_hotpatch_trampoline *trampoline; 170 u64 old; 171 172 trampoline = ftrace_get_trampoline(rec); 173 if (IS_ERR(trampoline)) 174 return PTR_ERR(trampoline); 175 if (get_kernel_nofault(old, &trampoline->interceptor)) 176 return -EFAULT; 177 if (old != old_addr) 178 return -EINVAL; 179 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 180 return 0; 181 } 182 183 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) 184 { 185 u16 old; 186 u8 op; 187 188 if (get_kernel_nofault(old, addr)) 189 return -EFAULT; 190 if (old != expected) 191 return -EINVAL; 192 /* set mask field to all ones or zeroes */ 193 op = enable ? 0xf4 : 0x04; 194 s390_kernel_write((char *)addr + 1, &op, sizeof(op)); 195 return 0; 196 } 197 198 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 199 unsigned long addr) 200 { 201 /* Expect brcl 0xf,... */ 202 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); 203 } 204 205 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 206 { 207 struct ftrace_hotpatch_trampoline *trampoline; 208 209 trampoline = ftrace_get_trampoline(rec); 210 if (IS_ERR(trampoline)) 211 return PTR_ERR(trampoline); 212 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 213 /* Expect brcl 0x0,... */ 214 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true); 215 } 216 217 int ftrace_update_ftrace_func(ftrace_func_t func) 218 { 219 ftrace_func = func; 220 return 0; 221 } 222 223 void arch_ftrace_update_code(int command) 224 { 225 ftrace_modify_all_code(command); 226 } 227 228 int ftrace_arch_code_modify_post_process(void) 229 { 230 /* 231 * Flush any pre-fetched instructions on all 232 * CPUs to make the new code visible. 233 */ 234 text_poke_sync_lock(); 235 return 0; 236 } 237 238 #ifdef CONFIG_MODULES 239 240 static int __init ftrace_plt_init(void) 241 { 242 const char *start, *end; 243 244 ftrace_plt = module_alloc(PAGE_SIZE); 245 if (!ftrace_plt) 246 panic("cannot allocate ftrace plt\n"); 247 248 start = ftrace_shared_hotpatch_trampoline(&end); 249 memcpy(ftrace_plt, start, end - start); 250 set_memory_ro((unsigned long)ftrace_plt, 1); 251 return 0; 252 } 253 device_initcall(ftrace_plt_init); 254 255 #endif /* CONFIG_MODULES */ 256 257 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 258 /* 259 * Hook the return address and push it in the stack of return addresses 260 * in current thread info. 261 */ 262 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 263 unsigned long ip) 264 { 265 if (unlikely(ftrace_graph_is_dead())) 266 goto out; 267 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 268 goto out; 269 ip -= MCOUNT_INSN_SIZE; 270 if (!function_graph_enter(ra, ip, 0, (void *) sp)) 271 ra = (unsigned long) return_to_handler; 272 out: 273 return ra; 274 } 275 NOKPROBE_SYMBOL(prepare_ftrace_return); 276 277 /* 278 * Patch the kernel code at ftrace_graph_caller location. The instruction 279 * there is branch relative on condition. To enable the ftrace graph code 280 * block, we simply patch the mask field of the instruction to zero and 281 * turn the instruction into a nop. 282 * To disable the ftrace graph code the mask field will be patched to 283 * all ones, which turns the instruction into an unconditional branch. 284 */ 285 int ftrace_enable_ftrace_graph_caller(void) 286 { 287 int rc; 288 289 /* Expect brc 0xf,... */ 290 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 291 if (rc) 292 return rc; 293 text_poke_sync_lock(); 294 return 0; 295 } 296 297 int ftrace_disable_ftrace_graph_caller(void) 298 { 299 int rc; 300 301 /* Expect brc 0x0,... */ 302 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 303 if (rc) 304 return rc; 305 text_poke_sync_lock(); 306 return 0; 307 } 308 309 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 310 311 #ifdef CONFIG_KPROBES_ON_FTRACE 312 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 313 struct ftrace_ops *ops, struct ftrace_regs *fregs) 314 { 315 struct kprobe_ctlblk *kcb; 316 struct pt_regs *regs; 317 struct kprobe *p; 318 int bit; 319 320 bit = ftrace_test_recursion_trylock(ip, parent_ip); 321 if (bit < 0) 322 return; 323 324 regs = ftrace_get_regs(fregs); 325 p = get_kprobe((kprobe_opcode_t *)ip); 326 if (!regs || unlikely(!p) || kprobe_disabled(p)) 327 goto out; 328 329 if (kprobe_running()) { 330 kprobes_inc_nmissed_count(p); 331 goto out; 332 } 333 334 __this_cpu_write(current_kprobe, p); 335 336 kcb = get_kprobe_ctlblk(); 337 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 338 339 instruction_pointer_set(regs, ip); 340 341 if (!p->pre_handler || !p->pre_handler(p, regs)) { 342 343 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 344 345 if (unlikely(p->post_handler)) { 346 kcb->kprobe_status = KPROBE_HIT_SSDONE; 347 p->post_handler(p, regs, 0); 348 } 349 } 350 __this_cpu_write(current_kprobe, NULL); 351 out: 352 ftrace_test_recursion_unlock(bit); 353 } 354 NOKPROBE_SYMBOL(kprobe_ftrace_handler); 355 356 int arch_prepare_kprobe_ftrace(struct kprobe *p) 357 { 358 p->ainsn.insn = NULL; 359 return 0; 360 } 361 #endif 362