1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracer architecture backend. 4 * 5 * Copyright IBM Corp. 2009,2014 6 * 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/hardirq.h> 11 #include <linux/uaccess.h> 12 #include <linux/ftrace.h> 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/kmsan-checks.h> 16 #include <linux/kprobes.h> 17 #include <linux/execmem.h> 18 #include <trace/syscall.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/text-patching.h> 21 #include <asm/cacheflush.h> 22 #include <asm/ftrace.lds.h> 23 #include <asm/nospec-branch.h> 24 #include <asm/set_memory.h> 25 #include "entry.h" 26 #include "ftrace.h" 27 28 /* 29 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8) 30 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags 31 * (since gcc 9 / clang 10) is used. 32 * In both cases the original and also the disabled function prologue contains 33 * only a single six byte instruction and looks like this: 34 * > brcl 0,0 # offset 0 35 * To enable ftrace the code gets patched like above and afterwards looks 36 * like this: 37 * > brasl %r0,ftrace_caller # offset 0 38 * 39 * The instruction will be patched by ftrace_make_call / ftrace_make_nop. 40 * The ftrace function gets called with a non-standard C function call ABI 41 * where r0 contains the return address. It is also expected that the called 42 * function only clobbers r0 and r1, but restores r2-r15. 43 * For module code we can't directly jump to ftrace caller, but need a 44 * trampoline (ftrace_plt), which clobbers also r1. 45 */ 46 47 void *ftrace_func __read_mostly = ftrace_stub; 48 struct ftrace_insn { 49 u16 opc; 50 s32 disp; 51 } __packed; 52 53 static const char *ftrace_shared_hotpatch_trampoline(const char **end) 54 { 55 const char *tstart, *tend; 56 57 tstart = ftrace_shared_hotpatch_trampoline_br; 58 tend = ftrace_shared_hotpatch_trampoline_br_end; 59 #ifdef CONFIG_EXPOLINE 60 if (!nospec_disable) { 61 tstart = ftrace_shared_hotpatch_trampoline_exrl; 62 tend = ftrace_shared_hotpatch_trampoline_exrl_end; 63 } 64 #endif /* CONFIG_EXPOLINE */ 65 if (end) 66 *end = tend; 67 return tstart; 68 } 69 70 bool ftrace_need_init_nop(void) 71 { 72 return !MACHINE_HAS_SEQ_INSN; 73 } 74 75 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 76 { 77 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = 78 __ftrace_hotpatch_trampolines_start; 79 static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 }; 80 static struct ftrace_hotpatch_trampoline *trampoline; 81 struct ftrace_hotpatch_trampoline **next_trampoline; 82 struct ftrace_hotpatch_trampoline *trampolines_end; 83 struct ftrace_hotpatch_trampoline tmp; 84 struct ftrace_insn *insn; 85 struct ftrace_insn old; 86 const char *shared; 87 s32 disp; 88 89 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) != 90 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE); 91 92 next_trampoline = &next_vmlinux_trampoline; 93 trampolines_end = __ftrace_hotpatch_trampolines_end; 94 shared = ftrace_shared_hotpatch_trampoline(NULL); 95 #ifdef CONFIG_MODULES 96 if (mod) { 97 next_trampoline = &mod->arch.next_trampoline; 98 trampolines_end = mod->arch.trampolines_end; 99 } 100 #endif 101 102 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end)) 103 return -ENOMEM; 104 trampoline = (*next_trampoline)++; 105 106 if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old))) 107 return -EFAULT; 108 /* Check for the compiler-generated fentry nop (brcl 0, .). */ 109 if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old)))) 110 return -EINVAL; 111 112 /* Generate the trampoline. */ 113 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */ 114 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2; 115 tmp.interceptor = FTRACE_ADDR; 116 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn); 117 s390_kernel_write(trampoline, &tmp, sizeof(tmp)); 118 119 /* Generate a jump to the trampoline. */ 120 disp = ((char *)trampoline - (char *)rec->ip) / 2; 121 insn = (struct ftrace_insn *)rec->ip; 122 s390_kernel_write(&insn->disp, &disp, sizeof(disp)); 123 124 return 0; 125 } 126 127 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec) 128 { 129 struct ftrace_hotpatch_trampoline *trampoline; 130 struct ftrace_insn insn; 131 s64 disp; 132 u16 opc; 133 134 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn))) 135 return ERR_PTR(-EFAULT); 136 disp = (s64)insn.disp * 2; 137 trampoline = (void *)(rec->ip + disp); 138 if (get_kernel_nofault(opc, &trampoline->brasl_opc)) 139 return ERR_PTR(-EFAULT); 140 if (opc != 0xc015) 141 return ERR_PTR(-EINVAL); 142 return trampoline; 143 } 144 145 static inline struct ftrace_insn 146 ftrace_generate_branch_insn(unsigned long ip, unsigned long target) 147 { 148 /* brasl r0,target or brcl 0,0 */ 149 return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004, 150 .disp = target ? (target - ip) / 2 : 0 }; 151 } 152 153 static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target, 154 unsigned long target) 155 { 156 struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target); 157 struct ftrace_insn new = ftrace_generate_branch_insn(ip, target); 158 struct ftrace_insn old; 159 160 if (!IS_ALIGNED(ip, 8)) 161 return -EINVAL; 162 if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old))) 163 return -EFAULT; 164 /* Verify that the to be replaced code matches what we expect. */ 165 if (memcmp(&orig, &old, sizeof(old))) 166 return -EINVAL; 167 s390_kernel_write((void *)ip, &new, sizeof(new)); 168 return 0; 169 } 170 171 static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec, 172 unsigned long old_addr, 173 unsigned long addr) 174 { 175 struct ftrace_hotpatch_trampoline *trampoline; 176 u64 old; 177 178 trampoline = ftrace_get_trampoline(rec); 179 if (IS_ERR(trampoline)) 180 return PTR_ERR(trampoline); 181 if (get_kernel_nofault(old, &trampoline->interceptor)) 182 return -EFAULT; 183 if (old != old_addr) 184 return -EINVAL; 185 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 186 return 0; 187 } 188 189 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 190 unsigned long addr) 191 { 192 if (MACHINE_HAS_SEQ_INSN) 193 return ftrace_patch_branch_insn(rec->ip, old_addr, addr); 194 else 195 return ftrace_modify_trampoline_call(rec, old_addr, addr); 196 } 197 198 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) 199 { 200 u16 old; 201 u8 op; 202 203 if (get_kernel_nofault(old, addr)) 204 return -EFAULT; 205 if (old != expected) 206 return -EINVAL; 207 /* set mask field to all ones or zeroes */ 208 op = enable ? 0xf4 : 0x04; 209 s390_kernel_write((char *)addr + 1, &op, sizeof(op)); 210 return 0; 211 } 212 213 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 214 unsigned long addr) 215 { 216 /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */ 217 if (MACHINE_HAS_SEQ_INSN) 218 return ftrace_patch_branch_insn(rec->ip, addr, 0); 219 else 220 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); 221 } 222 223 static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr) 224 { 225 struct ftrace_hotpatch_trampoline *trampoline; 226 227 trampoline = ftrace_get_trampoline(rec); 228 if (IS_ERR(trampoline)) 229 return PTR_ERR(trampoline); 230 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 231 /* Expect brcl 0x0,... */ 232 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true); 233 } 234 235 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 236 { 237 if (MACHINE_HAS_SEQ_INSN) 238 return ftrace_patch_branch_insn(rec->ip, 0, addr); 239 else 240 return ftrace_make_trampoline_call(rec, addr); 241 } 242 243 int ftrace_update_ftrace_func(ftrace_func_t func) 244 { 245 ftrace_func = func; 246 return 0; 247 } 248 249 void arch_ftrace_update_code(int command) 250 { 251 ftrace_modify_all_code(command); 252 } 253 254 void ftrace_arch_code_modify_post_process(void) 255 { 256 /* 257 * Flush any pre-fetched instructions on all 258 * CPUs to make the new code visible. 259 */ 260 text_poke_sync_lock(); 261 } 262 263 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 264 /* 265 * Hook the return address and push it in the stack of return addresses 266 * in current thread info. 267 */ 268 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 269 unsigned long ip) 270 { 271 if (unlikely(ftrace_graph_is_dead())) 272 goto out; 273 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 274 goto out; 275 ip -= MCOUNT_INSN_SIZE; 276 if (!function_graph_enter(ra, ip, 0, (void *) sp)) 277 ra = (unsigned long) return_to_handler; 278 out: 279 return ra; 280 } 281 NOKPROBE_SYMBOL(prepare_ftrace_return); 282 283 /* 284 * Patch the kernel code at ftrace_graph_caller location. The instruction 285 * there is branch relative on condition. To enable the ftrace graph code 286 * block, we simply patch the mask field of the instruction to zero and 287 * turn the instruction into a nop. 288 * To disable the ftrace graph code the mask field will be patched to 289 * all ones, which turns the instruction into an unconditional branch. 290 */ 291 int ftrace_enable_ftrace_graph_caller(void) 292 { 293 /* Expect brc 0xf,... */ 294 return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 295 } 296 297 int ftrace_disable_ftrace_graph_caller(void) 298 { 299 /* Expect brc 0x0,... */ 300 return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 301 } 302 303 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 304 305 #ifdef CONFIG_KPROBES_ON_FTRACE 306 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 307 struct ftrace_ops *ops, struct ftrace_regs *fregs) 308 { 309 struct kprobe_ctlblk *kcb; 310 struct pt_regs *regs; 311 struct kprobe *p; 312 int bit; 313 314 if (unlikely(kprobe_ftrace_disabled)) 315 return; 316 317 bit = ftrace_test_recursion_trylock(ip, parent_ip); 318 if (bit < 0) 319 return; 320 321 kmsan_unpoison_memory(fregs, ftrace_regs_size()); 322 regs = ftrace_get_regs(fregs); 323 p = get_kprobe((kprobe_opcode_t *)ip); 324 if (!regs || unlikely(!p) || kprobe_disabled(p)) 325 goto out; 326 327 if (kprobe_running()) { 328 kprobes_inc_nmissed_count(p); 329 goto out; 330 } 331 332 __this_cpu_write(current_kprobe, p); 333 334 kcb = get_kprobe_ctlblk(); 335 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 336 337 instruction_pointer_set(regs, ip); 338 339 if (!p->pre_handler || !p->pre_handler(p, regs)) { 340 341 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 342 343 if (unlikely(p->post_handler)) { 344 kcb->kprobe_status = KPROBE_HIT_SSDONE; 345 p->post_handler(p, regs, 0); 346 } 347 } 348 __this_cpu_write(current_kprobe, NULL); 349 out: 350 ftrace_test_recursion_unlock(bit); 351 } 352 NOKPROBE_SYMBOL(kprobe_ftrace_handler); 353 354 int arch_prepare_kprobe_ftrace(struct kprobe *p) 355 { 356 p->ainsn.insn = NULL; 357 return 0; 358 } 359 #endif 360